summaryrefslogtreecommitdiff
path: root/tools/dev
diff options
context:
space:
mode:
Diffstat (limited to 'tools/dev')
-rwxr-xr-xtools/dev/analyze-svnlogs.py193
-rw-r--r--tools/dev/aprerr.txt139
-rw-r--r--tools/dev/benchmarks/RepoPerf/ClearMemory.cpp55
-rw-r--r--tools/dev/benchmarks/RepoPerf/TimeWin.cpp118
-rw-r--r--tools/dev/benchmarks/RepoPerf/copy_repo.py313
-rw-r--r--tools/dev/benchmarks/RepoPerf/win_repo_bench.py268
-rwxr-xr-xtools/dev/benchmarks/large_dirs/create_bigdir.sh232
-rwxr-xr-xtools/dev/benchmarks/suite1/benchmark.py1309
-rwxr-xr-xtools/dev/benchmarks/suite1/cronjob102
-rw-r--r--tools/dev/benchmarks/suite1/crontab.entry5
-rwxr-xr-xtools/dev/benchmarks/suite1/generate_charts60
-rwxr-xr-xtools/dev/benchmarks/suite1/run145
-rw-r--r--tools/dev/benchmarks/suite1/run.bat105
-rwxr-xr-xtools/dev/build-svn-deps-win.pl919
-rwxr-xr-xtools/dev/check-license.py142
-rwxr-xr-xtools/dev/contribulyze.py767
-rwxr-xr-xtools/dev/datecheck.py102
-rwxr-xr-xtools/dev/find-bad-style.py57
-rwxr-xr-xtools/dev/find-control-statements.py178
-rwxr-xr-xtools/dev/find-unmoved-deprecated.sh36
-rw-r--r--tools/dev/fsfs-access-map.c794
-rw-r--r--tools/dev/gdb-py/README29
-rw-r--r--tools/dev/gdb-py/svndbg/__init__.py0
-rw-r--r--tools/dev/gdb-py/svndbg/printers.py417
-rwxr-xr-xtools/dev/gen-javahl-errors.py86
-rwxr-xr-xtools/dev/gen-py-errors.py109
-rwxr-xr-xtools/dev/gen_junit_report.py301
-rwxr-xr-xtools/dev/gnuify-changelog.pl164
-rwxr-xr-xtools/dev/graph-dav-servers.py194
-rwxr-xr-xtools/dev/histogram.py54
-rw-r--r--tools/dev/iz/defect.dem6
-rwxr-xr-xtools/dev/iz/ff2csv.command27
-rwxr-xr-xtools/dev/iz/ff2csv.py189
-rwxr-xr-xtools/dev/iz/find-fix.py454
-rwxr-xr-xtools/dev/iz/run-queries.sh62
-rwxr-xr-xtools/dev/lock-check.py114
-rwxr-xr-xtools/dev/log_revnum_change_asf.py97
-rwxr-xr-xtools/dev/merge-graph.py58
-rw-r--r--tools/dev/mergegraph/__init__.py20
-rw-r--r--tools/dev/mergegraph/mergegraph.py313
-rw-r--r--tools/dev/mergegraph/save_as_sh.py137
-rwxr-xr-xtools/dev/min-includes.sh80
-rwxr-xr-xtools/dev/mklog.py49
-rwxr-xr-xtools/dev/mlpatch.py167
-rwxr-xr-xtools/dev/normalize-dump.py137
-rwxr-xr-xtools/dev/po-merge.py197
-rwxr-xr-xtools/dev/prebuild-cleanup.sh45
-rwxr-xr-xtools/dev/random-commits.py50
-rwxr-xr-xtools/dev/remove-trailing-whitespace.sh24
-rwxr-xr-xtools/dev/sbox-ospath.py64
-rwxr-xr-xtools/dev/scramble-tree.py304
-rwxr-xr-xtools/dev/stress.pl498
-rw-r--r--tools/dev/svn-dev.el566
-rw-r--r--tools/dev/svn-dev.vim76
-rw-r--r--tools/dev/svn-entries.el156
-rwxr-xr-xtools/dev/svn-merge-revs.py122
-rw-r--r--tools/dev/svnmover/linenoise/LICENSE25
-rw-r--r--tools/dev/svnmover/linenoise/README.markdown52
-rw-r--r--tools/dev/svnmover/linenoise/linenoise.c1112
-rw-r--r--tools/dev/svnmover/linenoise/linenoise.h66
-rw-r--r--tools/dev/svnmover/merge3.c1399
-rw-r--r--tools/dev/svnmover/ra.c586
-rw-r--r--tools/dev/svnmover/scanlog.c517
-rw-r--r--tools/dev/svnmover/svnmover.c4759
-rw-r--r--tools/dev/svnmover/svnmover.h295
-rw-r--r--tools/dev/svnmover/util.c59
-rwxr-xr-xtools/dev/svnqlite3-dump50
-rw-r--r--tools/dev/svnraisetreeconflict/svnraisetreeconflict.c415
-rwxr-xr-xtools/dev/trails.py229
-rw-r--r--tools/dev/unix-build/Makefile.svn2112
-rw-r--r--tools/dev/unix-build/README96
-rwxr-xr-xtools/dev/verify-history.py97
-rwxr-xr-xtools/dev/warn-ignored-err.sh83
-rwxr-xr-xtools/dev/wc-format.py64
-rwxr-xr-xtools/dev/wc-ng/bump-to-19.py357
-rwxr-xr-xtools/dev/wc-ng/count-progress.py117
-rwxr-xr-xtools/dev/wc-ng/gather-data.sh78
-rwxr-xr-xtools/dev/wc-ng/graph-data.py70
-rwxr-xr-xtools/dev/wc-ng/populate-pristine.py108
-rw-r--r--tools/dev/wc-ng/svn-wc-db-tester.c269
-rwxr-xr-xtools/dev/which-error.py142
-rw-r--r--tools/dev/windows-build/Makefile155
-rw-r--r--tools/dev/windows-build/README22
-rw-r--r--tools/dev/windows-build/document-version.pl48
-rw-r--r--tools/dev/x509-parser.c179
85 files changed, 24966 insertions, 0 deletions
diff --git a/tools/dev/analyze-svnlogs.py b/tools/dev/analyze-svnlogs.py
new file mode 100755
index 0000000..883b413
--- /dev/null
+++ b/tools/dev/analyze-svnlogs.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+# Generate a report of each area each committer has touched over all time.
+#
+# $ svn log -v ^/ > svnlogdata
+# $ ./analyze-svnlogs.py < svnlogdata > report.txt
+#
+# NOTE: ./logdata.py is written with a cached version of the data extracted
+# from 'svnlogdata'. That data can be analyzed in many ways, beyond
+# what this script is reporting.
+#
+
+import sys
+import re
+
+
+RE_LOG_HEADER = re.compile('^(r[0-9]+) '
+ '\| ([^|]+) '
+ '\| ([^|]+) '
+ '\| ([0-9]+) line')
+RE_PATH = re.compile(r' [MARD] (.*?)( \(from .*\))?$')
+SEPARATOR = '-' * 72
+
+
+def parse_one_commit(logfile):
+ line = logfile.readline().strip()
+ if line != SEPARATOR:
+ raise ParseError('missing separator: %s' % line)
+
+ line = logfile.readline()
+ if not line:
+ # end of file!
+ return None, None
+
+ m = RE_LOG_HEADER.match(line)
+ if not m:
+ raise ParseError('could not match log header')
+ revision = m.group(1)
+ author = m.group(2)
+ num_lines = int(m.group(4))
+ paths = set()
+
+ # skip "Changed paths:"
+ line = logfile.readline().strip()
+ if not line:
+ # there were no paths. just a blank before the log message. continue on.
+ sys.stderr.write('Funny revision: %s\n' % revision)
+ else:
+ if not line.startswith('Changed'):
+ raise ParseError('log not run with -v. paths missing in %s' % revision)
+
+ # gather all the affected paths
+ while 1:
+ line = logfile.readline().rstrip()
+ if not line:
+ # just hit end of the changed paths
+ break
+ m = RE_PATH.match(line)
+ if not m:
+ raise ParseError('bad path in %s: %s' % (revision, line))
+ paths.add(m.group(1))
+
+ # suck up the log message
+ for i in range(num_lines):
+ logfile.readline()
+
+ return author, paths
+
+
+def parse_file(logfile):
+ authors = { }
+
+ while True:
+ author, paths = parse_one_commit(logfile)
+ if author is None:
+ return authors
+
+ if author in authors:
+ authors[author] = authors[author].union(paths)
+ else:
+ authors[author] = paths
+
+
+def write_logdata(authors):
+ out = open('logdata.py', 'w')
+ out.write('authors = {\n')
+ for author, paths in authors.items():
+ out.write(" '%s': set([\n" % author)
+ for path in paths:
+ out.write(' %s,\n' % repr(path))
+ out.write(' ]),\n')
+ out.write('}\n')
+
+
+def get_key(sectionroots, path):
+ key = None
+ for section in sectionroots:
+ if path.startswith(section):
+ # add one path element below top section to the key.
+ elmts = len(section.split('/')) + 1
+ # strip first element (always empty because path starts with '/')
+ key = tuple(path.split('/', elmts)[1:elmts])
+ break
+ if key == None:
+ # strip first element (always empty because path starts with '/')
+ key = tuple(path.split('/', 3)[1:3])
+ return key
+
+
+def print_report(authors, sectionroots=[ ]):
+ for author, paths in sorted(authors.items()):
+ topdirs = { }
+ for path in paths:
+ key = get_key(sectionroots, path)
+ if key in topdirs:
+ topdirs[key] += 1
+ else:
+ topdirs[key] = 1
+
+ print(author)
+ tags = [ ]
+ branches = [ ]
+ for topdir in sorted(topdirs):
+ if len(topdir) == 1:
+ assert topdirs[topdir] == 1
+ print(' %s (ROOT)' % topdir[0])
+ else:
+ if topdir[0] == 'tags':
+ if not topdir[1] in tags:
+ tags.append(topdir[1])
+ elif topdir[0] == 'branches':
+ if not topdir[1] in branches:
+ branches.append(topdir[1])
+ else:
+ print(' %s (%d items)' % ('/'.join(topdir), topdirs[topdir]))
+ if tags:
+ print(' TAGS: %s' % ', '.join(tags))
+ if branches:
+ print(' BRANCHES: %s' % ', '.join(branches))
+
+ print('')
+
+
+def run(logfile):
+ try:
+ import logdata
+ authors = logdata.authors
+ except ImportError:
+ authors = parse_file(logfile)
+ write_logdata(authors)
+
+ sectionroots = [
+ '/trunk/subversion/include/private',
+ '/trunk/subversion/include',
+ '/trunk/subversion/tests',
+ '/trunk/subversion',
+ '/trunk/tools',
+ '/trunk/contrib',
+ '/trunk/doc',
+ ];
+ print_report(authors, sectionroots)
+
+
+class ParseError(Exception):
+ pass
+
+
+if __name__ == '__main__':
+ if len(sys.argv) > 1:
+ logfile = open(sys.argv[1])
+ else:
+ logfile = sys.stdin
+ run(logfile)
diff --git a/tools/dev/aprerr.txt b/tools/dev/aprerr.txt
new file mode 100644
index 0000000..281c424
--- /dev/null
+++ b/tools/dev/aprerr.txt
@@ -0,0 +1,139 @@
+# This file is used by which-error.py and gen_base.py:write_errno_table()
+APR_SUCCESS = 0
+SOCBASEERR = 10000
+SOCEPERM = 10001
+SOCESRCH = 10003
+SOCEINTR = 10004
+SOCENXIO = 10006
+SOCEBADF = 10009
+SOCEACCES = 10013
+SOCEFAULT = 10014
+SOCEINVAL = 10022
+SOCEMFILE = 10024
+SOCEPIPE = 10032
+SOCEWOULDBLOCK = 10035
+SOCEINPROGRESS = 10036
+SOCEALREADY = 10037
+SOCENOTSOCK = 10038
+SOCEDESTADDRREQ = 10039
+SOCEMSGSIZE = 10040
+SOCEPROTOTYPE = 10041
+SOCENOPROTOOPT = 10042
+SOCEPROTONOSUPPORT = 10043
+SOCESOCKTNOSUPPORT = 10044
+SOCEOPNOTSUPP = 10045
+SOCEPFNOSUPPORT = 10046
+SOCEAFNOSUPPORT = 10047
+SOCEADDRINUSE = 10048
+SOCEADDRNOTAVAIL = 10049
+SOCENETDOWN = 10050
+SOCENETUNREACH = 10051
+SOCENETRESET = 10052
+SOCECONNABORTED = 10053
+SOCECONNRESET = 10054
+SOCENOBUFS = 10055
+SOCEISCONN = 10056
+SOCENOTCONN = 10057
+SOCESHUTDOWN = 10058
+SOCETOOMANYREFS = 10059
+SOCETIMEDOUT = 10060
+SOCECONNREFUSED = 10061
+SOCELOOP = 10062
+SOCENAMETOOLONG = 10063
+SOCEHOSTDOWN = 10064
+SOCEHOSTUNREACH = 10065
+SOCENOTEMPTY = 10066
+APR_UTIL_ERRSPACE_SIZE = 20000
+APR_OS_START_ERROR = 20000
+APR_ENOSTAT = 20001
+APR_ENOPOOL = 20002
+APR_EBADDATE = 20004
+APR_EINVALSOCK = 20005
+APR_ENOPROC = 20006
+APR_ENOTIME = 20007
+APR_ENODIR = 20008
+APR_ENOLOCK = 20009
+APR_ENOPOLL = 20010
+APR_ENOSOCKET = 20011
+APR_ENOTHREAD = 20012
+APR_ENOTHDKEY = 20013
+APR_EGENERAL = 20014
+APR_ENOSHMAVAIL = 20015
+APR_EBADIP = 20016
+APR_EBADMASK = 20017
+APR_EDSOOPEN = 20019
+APR_EABSOLUTE = 20020
+APR_ERELATIVE = 20021
+APR_EINCOMPLETE = 20022
+APR_EABOVEROOT = 20023
+APR_EBADPATH = 20024
+APR_EPATHWILD = 20025
+APR_ESYMNOTFOUND = 20026
+APR_EPROC_UNKNOWN = 20027
+APR_ENOTENOUGHENTROPY = 20028
+APR_OS_ERRSPACE_SIZE = 50000
+APR_OS_START_STATUS = 70000
+APR_INCHILD = 70001
+APR_INPARENT = 70002
+APR_DETACH = 70003
+APR_NOTDETACH = 70004
+APR_CHILD_DONE = 70005
+APR_CHILD_NOTDONE = 70006
+APR_TIMEUP = 70007
+APR_INCOMPLETE = 70008
+APR_BADCH = 70012
+APR_BADARG = 70013
+APR_EOF = 70014
+APR_NOTFOUND = 70015
+APR_ANONYMOUS = 70019
+APR_FILEBASED = 70020
+APR_KEYBASED = 70021
+APR_EINIT = 70022
+APR_ENOTIMPL = 70023
+APR_EMISMATCH = 70024
+APR_EBUSY = 70025
+APR_UTIL_START_STATUS = 100000
+APR_ENOKEY = 100001
+APR_ENOIV = 100002
+APR_EKEYTYPE = 100003
+APR_ENOSPACE = 100004
+APR_ECRYPT = 100005
+APR_EPADDING = 100006
+APR_EKEYLENGTH = 100007
+APR_ENOCIPHER = 100008
+APR_ENODIGEST = 100009
+APR_ENOENGINE = 100010
+APR_EINITENGINE = 100011
+APR_EREINIT = 100012
+APR_OS_START_USEERR = 120000
+APR_OS_START_USERERR = 120000
+APR_OS_START_CANONERR = 620000
+APR_EACCES = 620001
+APR_EEXIST = 620002
+APR_ENAMETOOLONG = 620003
+APR_ENOENT = 620004
+APR_ENOTDIR = 620005
+APR_ENOSPC = 620006
+APR_ENOMEM = 620007
+APR_EMFILE = 620008
+APR_ENFILE = 620009
+APR_EBADF = 620010
+APR_EINVAL = 620011
+APR_ESPIPE = 620012
+APR_EAGAIN = 620013
+APR_EINTR = 620014
+APR_ENOTSOCK = 620015
+APR_ECONNREFUSED = 620016
+APR_EINPROGRESS = 620017
+APR_ECONNABORTED = 620018
+APR_ECONNRESET = 620019
+APR_ETIMEDOUT = 620020
+APR_EHOSTUNREACH = 620021
+APR_ENETUNREACH = 620022
+APR_EFTYPE = 620023
+APR_EPIPE = 620024
+APR_EXDEV = 620025
+APR_ENOTEMPTY = 620026
+APR_EAFNOSUPPORT = 620027
+APR_OS_START_EAIERR = 670000
+APR_OS_START_SYSERR = 720000
diff --git a/tools/dev/benchmarks/RepoPerf/ClearMemory.cpp b/tools/dev/benchmarks/RepoPerf/ClearMemory.cpp
new file mode 100644
index 0000000..06ef6f5
--- /dev/null
+++ b/tools/dev/benchmarks/RepoPerf/ClearMemory.cpp
@@ -0,0 +1,55 @@
+/* ClearMemory.cpp --- A simple Window memory cleaning tool
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "targetver.h"
+
+#include <Windows.h>
+
+#include <stdio.h>
+#include <tchar.h>
+
+int _tmain(int argc, _TCHAR* argv[])
+{
+ // Get the current memory usage stats
+ MEMORYSTATUSEX statex;
+ statex.dwLength = sizeof (statex);
+ GlobalMemoryStatusEx(&statex);
+
+ // (Clean) cache memory will be listed under "available".
+ // So, allocate all available RAM, touch it and release it again.
+ unsigned char *memory = new unsigned char[statex.ullAvailPhys];
+ if (memory)
+ {
+ // Make every page dirty.
+ for (DWORDLONG i = 0; i < statex.ullAvailPhys; i += 4096)
+ memory[i]++;
+
+ // Give everything back to the OS.
+ // The in-RAM file read cache is empty now. There may still be bits in
+ // the swap file as well as dirty write buffers. But we don't care
+ // much about these here ...
+ delete memory;
+ }
+
+ return 0;
+}
+
diff --git a/tools/dev/benchmarks/RepoPerf/TimeWin.cpp b/tools/dev/benchmarks/RepoPerf/TimeWin.cpp
new file mode 100644
index 0000000..4acab99
--- /dev/null
+++ b/tools/dev/benchmarks/RepoPerf/TimeWin.cpp
@@ -0,0 +1,118 @@
+/* TimeWin.cpp --- A simple Windows tool inspired by Unix' "time".
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "targetver.h"
+
+#include <Windows.h>
+
+#include <stdio.h>
+#include <tchar.h>
+
+void usage()
+{
+ _tprintf(_T("Execute a command, redirect its stdout to NUL and print\n"));
+ _tprintf(_T("execution times ELAPSED\\tUSER\\tKERNEL in seconds.\n"));
+ _tprintf(_T("\n"));
+ _tprintf(_T("Usage: TimeWin.EXE COMMAND [PARAMETERS]\n"));
+}
+
+LPCTSTR skip_first_arg(LPCTSTR targv)
+{
+ LPCTSTR s = _tcschr(targv, ' ');
+ while (s && *s == ' ')
+ ++s;
+
+ return s;
+}
+
+double as_seconds(FILETIME time)
+{
+ return (double)*reinterpret_cast<LONGLONG *>(&time) / 10000000.0;
+}
+
+int _tmain(int argc, LPTSTR argv[])
+{
+ // Minimal CL help support
+ if (argc < 2 || _tcscmp(argv[1], _T("/?")) == 0)
+ {
+ usage();
+ return 0;
+ }
+
+ // Get a file handle for NUL.
+ SECURITY_ATTRIBUTES sa;
+ sa.nLength = sizeof(sa);
+ sa.lpSecurityDescriptor = NULL;
+ sa.bInheritHandle = TRUE;
+
+ HANDLE nul = CreateFile(_T("nul"), FILE_APPEND_DATA, FILE_SHARE_WRITE,
+ &sa, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+
+ // Construct a process startup info that uses the same handles as this
+ // one but redirects stdout to NUL.
+ STARTUPINFO startup_info;
+ GetStartupInfo(&startup_info);
+ startup_info.dwFlags |= STARTF_USESTDHANDLES;
+ startup_info.hStdOutput = nul;
+
+ // Execute the command line.
+ PROCESS_INFORMATION process_info;
+ CreateProcess(NULL, _tscdup(skip_first_arg(GetCommandLine())), NULL, NULL,
+ TRUE, NORMAL_PRIORITY_CLASS, NULL, NULL, &startup_info,
+ &process_info);
+
+ // Get a handle with the needed access rights to the child process.
+ HANDLE child = INVALID_HANDLE_VALUE;
+ DuplicateHandle(GetCurrentProcess(), process_info.hProcess,
+ GetCurrentProcess(), &child,
+ PROCESS_QUERY_INFORMATION | SYNCHRONIZE, FALSE, 0);
+
+ // Wait for the child to finish.
+ // If there was problem earlier (application not found etc.), this will fail.
+ bool success = false;
+ if (WaitForSingleObject(child, INFINITE) == WAIT_OBJECT_0)
+ {
+ // Finally, query the timers and show the result
+ FILETIME start_time, end_time, user_time, kernel_time;
+ if (GetProcessTimes(child, &start_time, &end_time, &kernel_time,
+ &user_time))
+ {
+ _tprintf(_T("%1.3f\t%1.3f\t%1.3f\n"),
+ as_seconds(end_time) - as_seconds(start_time),
+ as_seconds(user_time), as_seconds(kernel_time));
+ success = true;
+ }
+ }
+
+ // In case of failure, give some indication that something went wrong.
+ if (!success)
+ _tprintf(_T("?.???\t?.???f\t?.???\n"),
+
+ // Be good citizens and clean up our mess
+ CloseHandle(child);
+ CloseHandle(process_info.hThread);
+ CloseHandle(process_info.hProcess);
+
+ CloseHandle(nul);
+
+ return 0;
+}
diff --git a/tools/dev/benchmarks/RepoPerf/copy_repo.py b/tools/dev/benchmarks/RepoPerf/copy_repo.py
new file mode 100644
index 0000000..6f40c88
--- /dev/null
+++ b/tools/dev/benchmarks/RepoPerf/copy_repo.py
@@ -0,0 +1,313 @@
+#!/usr/bin/env python
+#
+# copy_repo.py: create multiple, interleaved copies of a set of repositories.
+#
+# Subversion is a tool for revision control.
+# See http://subversion.apache.org for more information.
+#
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+######################################################################
+
+# General modules
+import os
+import random
+import shutil
+import sys
+
+class Separators:
+ """ This class is a container for dummy / filler files.
+ It will be used to create spaces between repository
+ versions on disk, i.e. to simulate some aspect of
+ real-world FS fragmentation.
+
+ It gets initialized with some parent path as well as
+ the desired average file size and will create a new
+ such file with each call to write(). Automatic
+ sharding keeps FS specific overhead at bay. Call
+ cleanup() to eventually delete all dummy files. """
+
+ buffer = "A" * 4096
+ """ Write this non-NULL contents into the dummy files. """
+
+ def __init__(self, path, average_size):
+ """ Initialize and store all dummy files in a '__tmp'
+ sub-folder of PATH. The size of each dummy file
+ is a random value and will be slightly AVERAGE_SIZE
+ kBytes on average. A value of 0 will effectively
+ disable dummy file creation. """
+
+ self.path = os.path.join(path, '__tmp')
+ self.size = average_size
+ self.count = 0
+
+ if os.path.exists(self.path):
+ shutil.rmtree(self.path)
+
+ os.mkdir(self.path)
+
+ def write(self):
+ """ Add a new dummy file """
+
+ # Throw dice of a file size.
+ # Factor 1024 for kBytes, factor 2 for being an average.
+ size = (int)(float(self.size) * random.random() * 2 * 1024.0)
+
+ # Don't create empty files. This also implements the
+ # "average = 0 means no files" rule.
+ if size > 0:
+ self.count += 1
+
+ # Create a new shard for every 1000 files
+ subfolder = os.path.join(self.path, str(self.count / 1000))
+ if not os.path.exists(subfolder):
+ os.mkdir(subfolder)
+
+ # Create and write the file in 4k chunks.
+ # Writing full chunks will result in average file sizes
+ # being slightly above the SELF.SIZE. That's good enough
+ # for our purposes.
+ f = open(os.path.join(subfolder, str(self.count)), "wb")
+ while size > 0:
+ f.write(self.buffer)
+ size -= len(self.buffer)
+
+ f.close()
+
+ def cleanup(self):
+ """ Get rid of all the files (and folders) that we created. """
+
+ shutil.rmtree(self.path)
+
+class Repository:
+ """ Encapsulates key information of a repository. Is is being
+ used for copy sources only and contains information about
+ its NAME, PATH, SHARD_SIZE, HEAD revision and MIN_UNPACKED_REV. """
+
+ def _read_config(self, filename):
+ """ Read and return all lines from FILENAME.
+ This will be used to read 'format', 'current' etc. . """
+
+ f = open(os.path.join(self.path, 'db', filename), "rb")
+ lines = f.readlines()
+ f.close()
+
+ return lines
+
+ def __init__(self, parent, name):
+ """ Constructor collecting everything we need to know about
+ the repository NAME within PARENT folder. """
+
+ self.name = name
+ self.path = os.path.join(parent, name)
+
+ self.shard_size = int(self._read_config('format')[1].split(' ')[2])
+ self.min_unpacked_rev = int(self._read_config('min-unpacked-rev')[0])
+ self.head = int(self._read_config('current')[0])
+
+ def needs_copy(self, revision):
+ """ Return True if REVISION is a revision in this repository
+ and is "directly copyable", i.e. is either non-packed or
+ the first rev in a packed shard. Everything else is either
+ not a valid rev or already gets / got copied as part of
+ some packed shard. """
+
+ if revision > self.head:
+ return False
+ if revision < self.min_unpacked_rev:
+ return revision % self.shard_size == 0
+
+ return True
+
+ @classmethod
+ def is_repository(cls, path):
+ """ Quick check that PATH is (probably) a repository.
+ This is mainly to filter out aux files put next to
+ (not inside) the repositories to copy. """
+
+ format_path = os.path.join(path, 'db', 'format')
+ return os.path.isfile(format_path)
+
+class Multicopy:
+ """ Helper class doing the actual copying. It copies individual
+ revisions and packed shards from the one source repository
+ to multiple copies of it. The copies have the same name
+ as the source repo but with numbers 0 .. N-1 appended to it.
+
+ The copy process is being initiated by the constructor
+ (copies the repo skeleton w/o revision contents). Revision
+ contents is then copied by successive calls to the copy()
+ method. """
+
+ def _init_copy(self, number):
+ """ Called from the constructor, this will copy SELF.SOURCE_REPO
+ into NUMBER new repos below SELF.DEST_BASE but omit everything
+ below db/revs and db/revprops. """
+
+ src = self.source_repo.path
+ dst = self.dest_base + str(number)
+
+ # Copy the repo skeleton w/o revs and revprops
+ shutil.copytree(src, dst, ignore=shutil.ignore_patterns('revs', 'revprops'))
+
+ # Add revs and revprops
+ self.dst_revs.append(os.path.join(dst, 'db', 'revs'))
+ self.dst_revprops.append(os.path.join(dst, 'db', 'revprops'))
+
+ os.mkdir(self.dst_revs[number])
+ os.mkdir(self.dst_revprops[number])
+
+ def _copy_packed_shard(self, shard, number):
+ """ Copy packed shard number SHARD from SELF.SOURCE_REPO to
+ the copy NUMBER below SELF.DEST_BASE. """
+
+ # Shards are simple subtrees
+ src_revs = os.path.join(self.src_revs, str(shard) + '.pack')
+ dst_revs = os.path.join(self.dst_revs[number], str(shard) + '.pack')
+ src_revprops = os.path.join(self.src_revprops, str(shard) + '.pack')
+ dst_revprops = os.path.join(self.dst_revprops[number], str(shard) + '.pack')
+
+ shutil.copytree(src_revs, dst_revs)
+ shutil.copytree(src_revprops, dst_revprops)
+
+ # Special case: revprops of rev 0 are never packed => extra copy
+ if shard == 0:
+ src_revprops = os.path.join(self.src_revprops, '0')
+ dest_revprops = os.path.join(self.dst_revprops[number], '0')
+
+ shutil.copytree(src_revprops, dest_revprops)
+
+ def _copy_single_revision(self, revision, number):
+ """ Copy non-packed REVISION from SELF.SOURCE_REPO to the copy
+ NUMBER below SELF.DEST_BASE. """
+
+ shard = str(revision / self.source_repo.shard_size)
+
+ # Auto-create shard folder
+ if revision % self.source_repo.shard_size == 0:
+ os.mkdir(os.path.join(self.dst_revs[number], shard))
+ os.mkdir(os.path.join(self.dst_revprops[number], shard))
+
+ # Copy the rev file and the revprop file
+ src_rev = os.path.join(self.src_revs, shard, str(revision))
+ dest_rev = os.path.join(self.dst_revs[number], shard, str(revision))
+ src_revprop = os.path.join(self.src_revprops, shard, str(revision))
+ dest_revprop = os.path.join(self.dst_revprops[number], shard, str(revision))
+
+ shutil.copyfile(src_rev, dest_rev)
+ shutil.copyfile(src_revprop, dest_revprop)
+
+ def __init__(self, source, target_parent, count):
+ """ Initiate the copy process for the SOURCE repository to
+ be copied COUNT times into the TARGET_PARENT directory. """
+
+ self.source_repo = source
+ self.dest_base = os.path.join(target_parent, source.name)
+
+ self.src_revs = os.path.join(source.path, 'db', 'revs')
+ self.src_revprops = os.path.join(source.path, 'db', 'revprops')
+
+ self.dst_revs = []
+ self.dst_revprops = []
+ for i in range(0, count):
+ self._init_copy(i)
+
+ def copy(self, revision, number):
+ """ Copy (packed or non-packed) REVISION from SELF.SOURCE_REPO
+ to the copy NUMBER below SELF.DEST_BASE.
+
+ SELF.SOURCE_REPO.needs_copy(REVISION) must be True. """
+
+ if revision < self.source_repo.min_unpacked_rev:
+ self._copy_packed_shard(revision / self.source_repo.shard_size, number)
+ else:
+ self._copy_single_revision(revision, number)
+
+def copy_repos(src, dst, count, separator_size):
+ """ Under DST, create COUNT copies of all repositories immediately
+ below SRC.
+
+ All copies will "interleaved" such that we copy each individual
+ revision / packed shard to all target repos first before
+ continuing with the next revision / packed shard. After each
+ round (revision / packed shard) insert a temporary file of
+ SEPARATOR_SIZE kBytes on average to add more spacing between
+ revisions. The temp files get automatically removed at the end.
+
+ Please note that this function will clear DST before copying
+ anything into it. """
+
+ # Remove any remnants from the target folder.
+ # (DST gets auto-created by the first repo copy.)
+ shutil.rmtree(dst)
+
+ # Repositories to copy and the respective copy utilities
+ repositories = []
+ copies = []
+
+ # Find repositories, initiate copies and determine the range of
+ # revisions to copy in total
+ max_revision = 0
+ for name in os.listdir(src):
+ if Repository.is_repository(os.path.join(src, name)):
+ repository = Repository(src, name)
+ repositories.append(repository)
+ copies.append(Multicopy(repository, dst, count))
+
+ if repository.head > max_revision:
+ max_revision = repository.head
+
+ # Temp file collection (spacers)
+ separators = Separators(dst, separator_size)
+
+ # Copy all repos in revision,number-major order
+ for revision in xrange(0, max_revision + 1):
+ for number in xrange(0, count):
+
+ any_copy = False
+ for i in xrange(0, len(repositories)):
+ if repositories[i].needs_copy(revision):
+ any_copy = True
+ copies[i].copy(revision, number)
+
+ # Don't add spacers when nothing got copied (REVISION is
+ # packed in all repositories).
+ if any_copy:
+ separators.write()
+
+ # Now that all data is in position, remove the spacers
+ separators.cleanup()
+
+def show_usage():
+ """ Write a simple CL docstring """
+
+ print("Copies and duplicates repositories in a way that mimics larger deployments.")
+ print("")
+ print("Usage:")
+ print("copy_repo.py SRC DST COUNT SEPARATOR_SIZE")
+ print("")
+ print("SRC Immediate parent folder of all the repositories to copy.")
+ print("DST Folder to copy into; current contents will be lost.")
+ print("COUNT Number of copies to create of each source repository.")
+ print("SEPARATOR_SIZE Additional spacing, in kBytes, between revisions.")
+
+#main function
+if len(argv) == 5:
+ copy_repos(sys.argv[1], sys.argv[2], int(sys.argv[3]), int(sys.argv[4]))
+else:
+ show_usage()
diff --git a/tools/dev/benchmarks/RepoPerf/win_repo_bench.py b/tools/dev/benchmarks/RepoPerf/win_repo_bench.py
new file mode 100644
index 0000000..b2493d3
--- /dev/null
+++ b/tools/dev/benchmarks/RepoPerf/win_repo_bench.py
@@ -0,0 +1,268 @@
+#!/usr/bin/env python
+#
+# win_repo_bench.py: run repository / server performance tests on Windows.
+#
+# Subversion is a tool for revision control.
+# See http://subversion.apache.org for more information.
+#
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+######################################################################
+
+# General modules
+import os
+import shutil
+import sys
+import subprocess
+import time
+
+from win32com.shell import shell, shellcon
+
+# Adapt these paths to your needs
+
+# Contains all the REPOSITORIES
+repo_parent = "C:\\repos"
+
+# Where to create working copies
+wc_path = "C:\\wc"
+exe_path = "C:\\develop\\Subversion\\trunk\\Release"
+apache_path = "C:\\develop\\Subversion"
+
+# Test these repositories and in this order.
+# Actual repository names have numbers 0 .. REPETITIONS-1 append to them
+repositories = ["ruby-f6-nonpacked", "ruby-f7-nonpacked",
+ "ruby-f6-packed", "ruby-f7-packed",
+ "bsd-f6-nonpacked", "bsd-f7-nonpacked",
+ "bsd-f6-packed", "bsd-f7-packed"]
+
+# Basically lists the RA backends to test but as long as all repositories
+# can be accessed using any of them, arbitrary URLs are possible.
+prefixes = ["svn://localhost/", "http://localhost/svn/", "file:///C:/repos/"]
+
+# Number of time to repeat the tests. For each iteration, there must be
+# a separate copy of all repositories.
+repetitions = 3
+
+# Server configurations to test
+configurations = ['slow', 'medium', 'fast']
+svnserve_params = {
+ 'slow':"",
+ 'medium':"-M 256" ,
+ 'fast':"-M 1024 -c 0 --cache-revprops yes --block-read yes --client-speed 1000"
+}
+
+
+def clear_memory():
+ """ Clear in-RAM portion of the file / disk cache """
+ subprocess.call(["ClearMemory.exe"])
+
+def start_server(prefix, config):
+ """ Depending on the url PREFIX, start the corresponding server with the
+ given CONFIGuration. file: and http: access will actually have been
+ configured by set_config(). """
+
+ if prefix[:4] == "svn:":
+ exe = os.path.join(exe_path, "svnserve.exe")
+ command = "cmd.exe /c start " + exe + " -dr " + repo_parent + \
+ " " + svnserve_params[config]
+ subprocess.call(command)
+ time.sleep(2)
+ elif prefix[:5] == "http:":
+ exe = os.path.join(apache_path, 'bin', 'httpd.exe')
+ subprocess.call(exe + " -k start")
+ time.sleep(2)
+
+def stop_server(prefix):
+ """ Depending on the url PREFIX, stop / kill the corresponding server. """
+
+ if prefix[:4] == "svn:":
+ subprocess.call("cmd.exe /c taskkill /im svnserve.exe /f > nul 2>&1")
+ time.sleep(1)
+ elif prefix[:5] == "http:":
+ exe = os.path.join(apache_path, 'bin', 'httpd.exe')
+ subprocess.call(exe + " -k stop")
+ time.sleep(1)
+
+def run_cs_command(state, config, repository, prefix, args):
+ """ Run the client-side command given in ARGS. Log the STATE of the
+ caches, the CONFIG we are using, the REPOSITORY, the url PREFIX
+ and finally the execution times. """
+
+ # Make sure we can create a new working copy if we want to.
+ if os.path.exists(wc_path):
+ shutil.rmtree(wc_path)
+
+ # Select the client to use.
+ if ('null-export' in args) or ('null-log' in args):
+ exe = os.path.join(exe_path, "svn-bench.exe")
+ else:
+ exe = os.path.join(exe_path, "svn.exe")
+
+ # Display the operation
+ repo_title = repository.replace('nonpacked', 'nopack')
+ sys.stdout.write(state, "\t", repo_title, "\t", prefix, "\t", config, "\t ")
+ sys.stdout.flush()
+
+ # Execute the command and show the execution times
+ subprocess.call(["TimeWin.exe", exe] + args)
+
+
+def run_test_cs_sequence(config, repository, run, prefix, command, args):
+ """ Run the client-side COMMAND with the given ARGS in various stages
+ of cache heat-up. Execute the test with server CONFIG on REPOSITORY
+ with the given url PREFIX. """
+
+ # Build the full URL to use. Exports operate on the main dev line only.
+ url = prefix + repository + str(run)
+ if (command == 'export') or (command == 'null-export'):
+ if repository[:3] == 'bsd':
+ url += '/head'
+ else:
+ url += '/trunk'
+
+ # Full set of command arguments
+ args = [command, url] + args
+
+ # Free up caches best we can.
+ clear_memory()
+
+ # Caches are quite cool now and ready to take up new data
+ start_server(prefix, config)
+ run_cs_command("Cold", config, repository, prefix, args)
+ stop_server(prefix)
+
+ # OS caches are quite hot now.
+ # Run operation from hot OS caches but cold SVN caches.
+ start_server(prefix, config)
+ run_cs_command("WarmOS", config, repository, prefix, args)
+ stop_server(prefix)
+
+ # OS caches may be even hotter now.
+ # Run operation from hot OS caches but cold SVN caches.
+ start_server(prefix, config)
+ run_cs_command("HotOS", config, repository, prefix, args)
+
+ # Keep server process and thus the warmed up SVN caches.
+ # Run operation from hot OS and SVN caches.
+ run_cs_command("WrmSVN", config, repository, prefix, args)
+ run_cs_command("HotSVN", config, repository, prefix, args)
+ stop_server(prefix)
+
+
+def set_config(config):
+ """ Switch configuration files to CONFIG. This overwrites the client
+ config file with config.$CONFIG and the server config file with
+ subversion.$CONFIG.conf. """
+
+ appdata = shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, None, 0)
+ svn_config_folder = os.path.join(appdata, 'Subversion')
+ svn_config_file = os.path.join(svn_config_folder, 'config')
+ svn_config_template = svn_config_file + '.' + config
+
+ shutil.copyfile(svn_config_template, svn_config_file)
+
+ apache_config_folder = os.path.join(apache_path, 'conf', 'extra')
+ apache_config_file = os.path.join(apache_config_folder, 'subversion.conf')
+ apache_config_template = os.path.join(apache_config_folder,
+ 'subversion.' + config + '.conf')
+
+ shutil.copyfile(apache_config_template, apache_config_file)
+
+
+def run_test_cs_configurations(command, args):
+ """ Run client COMMAND with basic arguments ARGS in all configurations
+ repeatedly with all servers on all repositories. """
+
+ print
+ print(command)
+ print("")
+
+ for config in configurations:
+ set_config(config)
+ for prefix in prefixes:
+ # These two must be the innermost loops and must be in that order.
+ # It gives us the coldest caches and the least temporal favoritism.
+ for run in range(0, repetitions):
+ for repository in repositories:
+ run_test_cs_sequence(config, repository, run, prefix, command, args)
+
+def run_admin_command(state, config, repository, args):
+ """ Run the svnadmin command given in ARGS. Log the STATE of the
+ caches, the CONFIG we are using, the REPOSITORY and finally
+ the execution times. """
+
+ exe = os.path.join(exe_path, "svnadmin.exe")
+
+ if config == 'medium':
+ extra = ['-M', '256']
+ elif config == 'fast':
+ extra = ['-M', '1024']
+ else:
+ extra = []
+
+ sys.stdout.write(state, "\t", repository, "\t", config, "\t ")
+ sys.stdout.flush()
+ subprocess.call(["TimeWin.exe", exe] + args + extra)
+
+def run_test_admin_sequence(config, repository, run, command, args):
+ """ Run the svnadmin COMMAND with the given ARGS in various stages
+ of cache heat-up. Execute the test with server CONFIG on
+ REPOSITORY. """
+
+ # Full set of command arguments
+ path = os.path.join(repo_parent, repository + str(run))
+ args = [command, path] + args
+
+ # Free up caches best we can.
+ clear_memory()
+
+ # svnadmin runs can be quite costly and are usually CPU-bound.
+ # Test with "cold" and "hot" CPU caches only.
+ run_admin_command("Cold", config, repository, args)
+ run_admin_command("Hot", config, repository, args)
+
+
+def run_test_admin_configurations(command, args):
+ """ Run svnadmin COMMAND with basic arguments ARGS in all configurations
+ repeatedly on all repositories. """
+
+ print("")
+ print(command)
+ print("")
+
+ for config in configurations:
+ # These two must be the innermost loops and must be in that order.
+ # It gives us the coldest caches and the least temporal favoritism.
+ for run in range(0, repetitions):
+ for repository in repositories:
+ run_test_admin_sequence(config, repository, run, command, args)
+
+
+def bench():
+ """ Run all performance tests. """
+
+ run_test_cs_configurations('log', ['-v', '--limit', '50000'])
+ run_test_cs_configurations('export', [wc_path, '-q'])
+
+ run_test_cs_configurations('null-log', ['-v', '--limit', '50000', '-q'])
+ run_test_cs_configurations('null-export', ['-q'])
+
+ run_test_admin_configurations('dump', ['-q'])
+
+# main function
+bench()
diff --git a/tools/dev/benchmarks/large_dirs/create_bigdir.sh b/tools/dev/benchmarks/large_dirs/create_bigdir.sh
new file mode 100755
index 0000000..c2830c8
--- /dev/null
+++ b/tools/dev/benchmarks/large_dirs/create_bigdir.sh
@@ -0,0 +1,232 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# usage: run this script from the root of your working copy
+# and / or adjust the path settings below as needed
+
+# set SVNPATH to the 'subversion' folder of your SVN source code w/c
+
+SVNPATH="$('pwd')/subversion"
+
+# if using the installed svn, you may need to adapt the following.
+# Uncomment the VALGRIND line to use that tool instead of "time".
+# Comment the SVNSERVE line to use file:// instead of svn://.
+
+SVN=${SVNPATH}/svn/svn
+SVNADMIN=${SVNPATH}/svnadmin/svnadmin
+SVNSERVE=${SVNPATH}/svnserve/svnserve
+# VALGRIND="valgrind --tool=callgrind"
+
+# set your data paths here
+
+WC=/dev/shm/wc
+REPOROOT=/dev/shm
+
+# number of items per folder on the first run. It will be doubled
+# after every iteration. The test will stop if MAXCOUNT has been
+# reached or exceeded (and will not be executed for MAXCOUNT).
+
+FILECOUNT=1
+MAXCOUNT=20000
+
+# only 1.7 supports server-side caching and uncompressed data transfer
+
+SERVEROPTS="-c 0 -M 400"
+
+# from here on, we should be good
+
+TIMEFORMAT='%3R %3U %3S'
+REPONAME=dirs
+PORT=54321
+if [ "${SVNSERVE}" != "" ] ; then
+ URL=svn://localhost:$PORT/$REPONAME
+else
+ URL=file://${REPOROOT}/$REPONAME
+fi
+
+# create repository
+
+rm -rf $WC $REPOROOT/$REPONAME
+mkdir $REPOROOT/$REPONAME
+${SVNADMIN} create $REPOROOT/$REPONAME
+echo "[general]
+anon-access = write" > $REPOROOT/$REPONAME/conf/svnserve.conf
+
+# fire up svnserve
+
+if [ "${SVNSERVE}" != "" ] ; then
+ VERSION=$( ${SVNSERVE} --version | grep " version" | sed 's/.*\ 1\.\([0-9]\).*/\1/' )
+ if [ "$VERSION" -lt "7" ]; then
+ SERVEROPTS=""
+ fi
+
+ ${SVNSERVE} -Tdr ${REPOROOT} ${SERVEROPTS} --listen-port ${PORT} --foreground &
+ PID=$!
+ sleep 1
+fi
+
+# construct valgrind parameters
+
+if [ "${VALGRIND}" != "" ] ; then
+ VG_TOOL=$( echo ${VALGRIND} | sed 's/.*\ --tool=\([a-z]*\).*/\1/' )
+ VG_OUTFILE="--${VG_TOOL}-out-file"
+fi
+
+# print header
+
+printf "using "
+${SVN} --version | grep " version"
+echo
+
+# init working copy
+
+rm -rf $WC
+${SVN} co $URL $WC > /dev/null
+
+# helpers
+
+get_sequence() {
+ # three equivalents...
+ (jot - "$1" "$2" "1" 2>/dev/null || seq -s ' ' "$1" "$2" 2>/dev/null || python -c "for i in range($1,$2+1): print(i)")
+}
+
+# functions that execute an SVN command
+
+run_svn() {
+ if [ "${VALGRIND}" = "" ] ; then
+ time ${SVN} $1 $WC/$2 $3 > /dev/null
+ else
+ ${VALGRIND} ${VG_OUTFILE}="${VG_TOOL}.out.$1.$2" ${SVN} $1 $WC/$2 $3 > /dev/null
+ fi
+}
+
+run_svn_del() {
+ if [ "${VALGRIND}" = "" ] ; then
+ time ${SVN} del $WC/${1}_c/$2 -q > /dev/null
+ else
+ ${VALGRIND} ${VG_OUTFILE}="${VG_TOOL}.out.del.$1" ${SVN} del $WC/${1}_c/$2 -q > /dev/null
+ fi
+}
+
+run_svn_del_many() {
+ printf "\n" > files.lst
+ sequence=`get_sequence 2 ${1}`
+ for i in $sequence; do
+ printf "$WC/${1}_c/$i\n" >> files.lst
+ done
+
+ if [ "${VALGRIND}" = "" ] ; then
+ time ${SVN} del -q --targets files.lst > /dev/null
+ else
+ ${VALGRIND} ${VG_OUTFILE}="${VG_TOOL}.out.del_many.$1" ${SVN} del -q --targets files.lst > /dev/null
+ fi
+}
+
+run_svn_ci() {
+ if [ "${VALGRIND}" = "" ] ; then
+ time ${SVN} ci $WC/$1 -m "" -q > /dev/null
+ else
+ ${VALGRIND} ${VG_OUTFILE}="${VG_TOOL}.out.ci_$2.$1" ${SVN} ci $WC/$1 -m "" -q > /dev/null
+ fi
+}
+
+run_svn_cp() {
+ if [ "${VALGRIND}" = "" ] ; then
+ time ${SVN} cp $WC/$1 $WC/$2 > /dev/null
+ else
+ ${VALGRIND} ${VG_OUTFILE}="${VG_TOOL}.out.cp.$1" ${SVN} cp $WC/$1 $WC/$2 > /dev/null
+ fi
+}
+
+run_svn_get() {
+ if [ "${VALGRIND}" = "" ] ; then
+ time ${SVN} $1 $URL $WC -q > /dev/null
+ else
+ ${VALGRIND} ${VG_OUTFILE}="${VG_TOOL}.out.$1.$2" ${SVN} $1 $URL $WC -q > /dev/null
+ fi
+}
+
+# main loop
+
+while [ $FILECOUNT -lt $MAXCOUNT ]; do
+ echo "Processing $FILECOUNT files in the same folder"
+
+ sequence=`get_sequence 2 $FILECOUNT`
+ printf "\tCreating files ... \t real user sys\n"
+ mkdir $WC/$FILECOUNT
+ for i in 1 $sequence; do
+ echo "File number $i" > $WC/$FILECOUNT/$i
+ done
+
+ printf "\tAdding files ... \t"
+ run_svn add $FILECOUNT -q
+
+ printf "\tRunning status ... \t"
+ run_svn st $FILECOUNT -q
+
+ printf "\tCommit files ... \t"
+ run_svn_ci $FILECOUNT add
+
+ printf "\tListing files ... \t"
+ run_svn ls $FILECOUNT
+
+ printf "\tUpdating files ... \t"
+ run_svn up $FILECOUNT -q
+
+ printf "\tLocal copy ... \t"
+ run_svn_cp $FILECOUNT ${FILECOUNT}_c
+
+ printf "\tCommit copy ... \t"
+ run_svn_ci ${FILECOUNT}_c copy
+
+ printf "\tDelete 1 file ... \t"
+ run_svn_del ${FILECOUNT} 1
+
+ printf "\tDeleting files ... \t"
+ if [ "$FILECOUNT" == "1" ] ; then
+ printf " skipped (0 files to delete)\n"
+ else
+ run_svn_del_many ${FILECOUNT}
+ fi
+
+ printf "\tCommit deletions ..\t"
+ run_svn_ci ${FILECOUNT}_c del
+
+ rm -rf $WC
+
+ printf "\tExport all ... \t"
+ run_svn_get export $FILECOUNT
+
+ rm -rf $WC
+ mkdir $WC
+
+ printf "\tCheck out all ... \t"
+ run_svn_get co $FILECOUNT
+
+ FILECOUNT=`echo 2 \* $FILECOUNT | bc`
+ echo ""
+done
+
+# tear down
+
+if [ "${SVNSERVE}" != "" ] ; then
+ echo "killing svnserve ... "
+ kill $PID
+fi
+
diff --git a/tools/dev/benchmarks/suite1/benchmark.py b/tools/dev/benchmarks/suite1/benchmark.py
new file mode 100755
index 0000000..250d1d5
--- /dev/null
+++ b/tools/dev/benchmarks/suite1/benchmark.py
@@ -0,0 +1,1309 @@
+#!/usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""Usage: benchmark.py run|list|compare|show|chart <selection> ...
+
+SELECTING TIMINGS -- B@R,LxS
+
+In the subcommands below, a timings selection consists of a string with up to
+four elements:
+ <branch>@<revision>,<levels>x<spread>
+abbreviated as:
+ B@R,LxS
+
+<branch> is a label of an svn branch, e.g. "1.7.x".
+<revision> is the last-changed-revision of above branch.
+<levels> is the number of directory levels created in the benchmark.
+<spread> is the number of child trees spreading off each dir level.
+
+<branch_name> and <revision> are simply used for labeling. Upon the actual
+test runs, you should enter labels matching the selected --svn-bin-dir.
+Later, you can select runs individually by using these labels.
+
+For <revision>, you can provide special keywords:
+- 'each' has the same effect as entering each available revision number that
+ is on record in the db in a separate timings selection.
+- 'last' is the same as 'each', but shows only the last 10 revisions. 'last'
+ can be combined with a number, e.g. 'last12'.
+
+For all subcommands except 'run', you can omit some or all of the elements of
+a timings selection to combine all available timings sets. Try that out with
+the 'list' subcommand.
+
+Examples:
+ benchmark.py run 1.7.x@12345,5x5
+ benchmark.py show trunk@12345
+ benchmark.py compare 1.7.0,1x100 trunk@each,1x100
+ benchmark.py chart compare 1.7.0,5x5 trunk@last12,5x5
+
+
+RUN BENCHMARKS
+
+ benchmark.py run B@R,LxS [N] [options]
+
+Test data is added to an sqlite database created automatically, by default
+'benchmark.db' in the current working directory. To specify a different path,
+use option -f <path_to_db>.
+
+If <N> is provided, the run is repeated N times.
+
+<levels> and <spread> control the way the tested working copy is structured:
+ <levels>: number of directory levels to create.
+ <spread>: number of files and subdirectories created in each dir.
+
+
+LIST WHAT IS ON RECORD
+
+ benchmark.py list [B@R,LxS]
+
+Find entries in the database for the given constraints. Any arguments can
+be omitted. (To select only a rev, start with a '@', like '@123'; to select
+only spread, start with an 'x', like "x100".)
+
+Call without arguments to get a listing of all available constraints.
+
+
+COMPARE TIMINGS
+
+ benchmark.py compare B@R,LxS B@R,LxS [B@R,LxS [...]]
+
+Compare any number of timings sets to the first provided set (in text mode).
+For example:
+ benchmark.py compare 1.7.0 trunk@1349903
+ Compare the total timings of all combined '1.7.0' branch runs to
+ all combined runs of 'trunk'-at-revision-1349903.
+ benchmark.py compare 1.7.0,5x5 trunk@1349903,5x5
+ Same as above, but only compare the working copy types with 5 levels
+ and a spread of 5.
+
+Use the -c option to limit comparison to specific command names.
+
+
+SHOW TIMINGS
+
+ benchmark.py show B@R,LxS [B@R,LxS [...]]
+
+Print out a summary of the timings selected from the given constraints.
+
+
+GENERATE CHARTS
+
+ benchmark.py chart compare B@R,LxS B@R,LxS [ B@R,LxS ... ]
+
+Produce a bar chart that compares any number of sets of timings. Like with
+the plain 'compare' command, the first set is taken as a reference point for
+100% and +-0 seconds. Each following dataset produces a set of labeled bar
+charts, grouped by svn command names. At least two timings sets must be
+provided.
+
+Use the -c option to limit comparison to specific command names.
+
+
+EXAMPLES
+
+# Run 3 benchmarks on svn 1.7.0 with 5 dir levels and 5 files and subdirs for
+# each level (spread). Timings are saved in ./benchmark.db.
+# Provide label '1.7.0' and its Last-Changed-Rev for later reference.
+./benchmark.py run --svn-bin-dir ~/svn-prefix/1.7.0/bin 1.7.0@1181106,5x5 3
+
+# Record 3 benchmark runs on trunk, again naming its Last-Changed-Rev.
+# (You may also set your $PATH instead of using --svn-bin-dir.)
+./benchmark.py run --svn-bin-dir ~/svn-prefix/trunk/bin trunk@1352725,5x5 3
+
+# Work with the results of above two runs
+./benchmark.py list
+./benchmark.py compare 1.7.0 trunk
+./benchmark.py show 1.7.0 trunk
+./benchmark.py chart compare 1.7.0 trunk
+./benchmark.py chart compare 1.7.0 trunk -c "update,commit,TOTAL RUN"
+
+# Rebuild r1352598, run it and chart improvements since 1.7.0.
+svn up -r1352598 ~/src/trunk
+make -C ~/src/trunk dist-clean install
+export PATH="$HOME/svn-prefix/trunk/bin:$PATH"
+which svn
+./benchmark.py run trunk@1352598,5x5 3
+./benchmark.py chart compare 1.7.0 trunk@1352598 trunk@1352725 -o chart.svg
+
+
+GLOBAL OPTIONS"""
+
+import os
+import time
+import datetime
+import sqlite3
+import optparse
+import tempfile
+import subprocess
+import random
+import shutil
+import stat
+import string
+from copy import copy
+
+IGNORE_COMMANDS = ('--version', )
+TOTAL_RUN = 'TOTAL RUN'
+
+j = os.path.join
+
+def bail(msg=None):
+ if msg:
+ print(msg)
+ exit(1)
+
+def time_str():
+ return time.strftime('%Y-%m-%d %H:%M:%S');
+
+def timedelta_to_seconds(td):
+ return ( float(td.seconds)
+ + float(td.microseconds) / (10**6)
+ + td.days * 24 * 60 * 60 )
+
+def run_cmd(cmd, stdin=None, shell=False, verbose=False):
+ if options.verbose:
+ if shell:
+ printable_cmd = cmd
+ else:
+ printable_cmd = ' '.join(cmd)
+ print('CMD:', printable_cmd)
+
+ if stdin:
+ stdin_arg = subprocess.PIPE
+ else:
+ stdin_arg = None
+
+ p = subprocess.Popen(cmd,
+ stdin=stdin_arg,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=shell)
+ stdout,stderr = p.communicate(input=stdin)
+
+ if verbose:
+ if (stdout):
+ print("STDOUT: [[[\n%s]]]" % ''.join(stdout))
+ if (stderr):
+ print("STDERR: [[[\n%s]]]" % ''.join(stderr))
+
+ return stdout, stderr
+
+
+_next_unique_basename_count = 0
+
+def next_unique_basename(prefix):
+ global _next_unique_basename_count
+ _next_unique_basename_count += 1
+ return '_'.join((prefix, str(_next_unique_basename_count)))
+
+
+si_units = [
+ (1000 ** 5, 'P'),
+ (1000 ** 4, 'T'),
+ (1000 ** 3, 'G'),
+ (1000 ** 2, 'M'),
+ (1000 ** 1, 'K'),
+ (1000 ** 0, ''),
+ ]
+def n_label(n):
+ """(stolen from hurry.filesize)"""
+ for factor, suffix in si_units:
+ if n >= factor:
+ break
+ amount = int(n/factor)
+ if isinstance(suffix, tuple):
+ singular, multiple = suffix
+ if amount == 1:
+ suffix = singular
+ else:
+ suffix = multiple
+ return str(amount) + suffix
+
+
+def split_arg_once(l_r, sep):
+ if not l_r:
+ return (None, None)
+ if sep in l_r:
+ l, r = l_r.split(sep)
+ else:
+ l = l_r
+ r = None
+ if not l:
+ l = None
+ if not r:
+ r = None
+ return (l, r)
+
+RUN_KIND_SEPARATORS=('@', ',', 'x')
+
+class RunKind:
+ def __init__(self, b_r_l_s):
+ b_r, l_s = split_arg_once(b_r_l_s, RUN_KIND_SEPARATORS[1])
+ self.branch, self.revision = split_arg_once(b_r, RUN_KIND_SEPARATORS[0])
+ self.levels, self.spread = split_arg_once(l_s, RUN_KIND_SEPARATORS[2])
+ if self.levels: self.levels = int(self.levels)
+ if self.spread: self.spread = int(self.spread)
+
+ def label(self):
+ label_parts = []
+ if self.branch:
+ label_parts.append(self.branch)
+ if self.revision:
+ label_parts.append(RUN_KIND_SEPARATORS[0])
+ label_parts.append(self.revision)
+ if self.levels or self.spread:
+ label_parts.append(RUN_KIND_SEPARATORS[1])
+ if self.levels:
+ label_parts.append(str(self.levels))
+ if self.spread:
+ label_parts.append(RUN_KIND_SEPARATORS[2])
+ label_parts.append(str(self.spread))
+ return ''.join(label_parts)
+
+ def args(self):
+ return (self.branch, self.revision, self.levels, self.spread)
+
+
+def parse_timings_selections(db, *args):
+ run_kinds = []
+
+ for arg in args:
+ run_kind = RunKind(arg)
+
+ if run_kind.revision == 'each':
+ run_kind.revision = None
+ query = TimingQuery(db, run_kind)
+ for revision in query.get_sorted_revisions():
+ revision_run_kind = copy(run_kind)
+ revision_run_kind.revision = revision
+ run_kinds.append(revision_run_kind)
+ elif run_kind.revision and run_kind.revision.startswith('last'):
+ Nstr = run_kind.revision[4:]
+ if not Nstr:
+ N = 10
+ else:
+ N = int(Nstr)
+ run_kind.revision = None
+ query = TimingQuery(db, run_kind)
+ for revision in query.get_sorted_revisions()[-N:]:
+ revision_run_kind = copy(run_kind)
+ revision_run_kind.revision = revision
+ run_kinds.append(revision_run_kind)
+ else:
+ run_kinds.append(run_kind)
+
+ return run_kinds
+
+def parse_one_timing_selection(db, *args):
+ run_kinds = parse_timings_selections(db, *args)
+ if len(run_kinds) != 1:
+ bail("I need exactly one timings identifier, not '%s'"
+ % (' '.join(*args)))
+ return run_kinds[0]
+
+
+
+
+PATHNAME_VALID_CHARS = "-_.,@%s%s" % (string.ascii_letters, string.digits)
+def filesystem_safe_string(s):
+ return ''.join(c for c in s if c in PATHNAME_VALID_CHARS)
+
+def do_div(ref, val):
+ if ref:
+ return float(val) / float(ref)
+ else:
+ return 0.0
+
+def do_diff(ref, val):
+ return float(val) - float(ref)
+
+
+# ------------------------- database -------------------------
+
+class TimingsDb:
+ def __init__(self, db_path):
+ self.db_path = db_path;
+ self.conn = sqlite3.connect(db_path)
+ self.ensure_tables_created()
+
+ def ensure_tables_created(self):
+ c = self.conn.cursor()
+
+ c.execute("""SELECT name FROM sqlite_master WHERE type='table' AND
+ name='batch'""")
+ if c.fetchone():
+ # exists
+ return
+
+ print('Creating database tables.')
+ c.executescript('''
+ CREATE TABLE batch (
+ batch_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ started TEXT,
+ ended TEXT
+ );
+
+ CREATE TABLE run_kind (
+ run_kind_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ branch TEXT NOT NULL,
+ revision TEXT NOT NULL,
+ wc_levels INTEGER,
+ wc_spread INTEGER,
+ UNIQUE(branch, revision, wc_levels, wc_spread)
+ );
+
+ CREATE TABLE run (
+ run_id INTEGER PRIMARY KEY AUTOINCREMENT,
+ batch_id INTEGER NOT NULL REFERENCES batch(batch_id),
+ run_kind_id INTEGER NOT NULL REFERENCES run_kind(run_kind_id),
+ started TEXT,
+ ended TEXT,
+ aborted INTEGER
+ );
+
+ CREATE TABLE timings (
+ run_id INTEGER NOT NULL REFERENCES run(run_id),
+ command TEXT NOT NULL,
+ sequence INTEGER,
+ timing REAL
+ );'''
+ )
+ self.conn.commit()
+ c.close();
+
+
+class Batch:
+ def __init__(self, db):
+ self.db = db
+ self.started = time_str()
+ c = db.conn.cursor()
+ c.execute("INSERT INTO batch (started) values (?)", (self.started,))
+ db.conn.commit()
+ self.id = c.lastrowid
+ c.close()
+
+ def done(self):
+ conn = self.db.conn
+ c = conn.cursor()
+ c.execute("""
+ UPDATE batch
+ SET ended = ?
+ WHERE batch_id = ?""",
+ (time_str(), self.id))
+ conn.commit()
+ c.close()
+
+class Run:
+ def __init__(self, batch, run_kind):
+ self.batch = batch
+ conn = self.batch.db.conn
+ c = conn.cursor()
+
+ c.execute("""
+ SELECT run_kind_id FROM run_kind
+ WHERE branch = ?
+ AND revision = ?
+ AND wc_levels = ?
+ AND wc_spread = ?""",
+ run_kind.args())
+ kind_ids = c.fetchone()
+ if kind_ids:
+ kind_id = kind_ids[0]
+ else:
+ c.execute("""
+ INSERT INTO run_kind (branch, revision, wc_levels, wc_spread)
+ VALUES (?, ?, ?, ?)""",
+ run_kind.args())
+ conn.commit()
+ kind_id = c.lastrowid
+
+ self.started = time_str()
+
+ c.execute("""
+ INSERT INTO run
+ (batch_id, run_kind_id, started)
+ VALUES
+ (?, ?, ?)""",
+ (self.batch.id, kind_id, self.started))
+ conn.commit()
+ self.id = c.lastrowid
+ c.close();
+ self.tic_at = None
+ self.current_command = None
+ self.timings = []
+
+ def tic(self, command):
+ if command in IGNORE_COMMANDS:
+ return
+ self.toc()
+ self.current_command = command
+ self.tic_at = datetime.datetime.now()
+
+ def toc(self):
+ if self.current_command and self.tic_at:
+ toc_at = datetime.datetime.now()
+ self.remember_timing(self.current_command,
+ timedelta_to_seconds(toc_at - self.tic_at))
+ self.current_command = None
+ self.tic_at = None
+
+ def remember_timing(self, command, seconds):
+ self.timings.append((command, seconds))
+
+ def submit_timings(self):
+ conn = self.batch.db.conn
+ c = conn.cursor()
+ print('submitting...')
+
+ c.executemany("""
+ INSERT INTO timings
+ (run_id, command, sequence, timing)
+ VALUES
+ (?, ?, ?, ?)""",
+ [(self.id, t[0], (i + 1), t[1]) for i,t in enumerate(self.timings)])
+
+ conn.commit()
+ c.close()
+
+ def done(self, aborted=False):
+ conn = self.batch.db.conn
+ c = conn.cursor()
+ c.execute("""
+ UPDATE run
+ SET ended = ?, aborted = ?
+ WHERE run_id = ?""",
+ (time_str(), aborted, self.id))
+ conn.commit()
+ c.close()
+
+
+class TimingQuery:
+ def __init__(self, db, run_kind):
+ self.cursor = db.conn.cursor()
+ self.constraints = []
+ self.values = []
+ self.timings = None
+ self.FROM_WHERE = """
+ FROM batch AS b,
+ timings AS t,
+ run AS r,
+ run_kind as k
+ WHERE
+ t.run_id = r.run_id
+ AND k.run_kind_id = r.run_kind_id
+ AND b.batch_id = r.batch_id
+ AND r.aborted = 0
+ """
+ self.append_constraint('k.branch', run_kind.branch)
+ self.each_revision = False
+ if run_kind.revision == 'each':
+ self.each_revision = True
+ else:
+ self.append_constraint('k.revision', run_kind.revision)
+ self.append_constraint('k.wc_levels', run_kind.levels)
+ self.append_constraint('k.wc_spread', run_kind.spread)
+ self.label = run_kind.label()
+
+ def append_constraint(self, column_name, val):
+ if val:
+ self.constraints.append('AND %s = ?' % column_name)
+ self.values.append(val)
+
+ def remove_last_constraint(self):
+ del self.constraints[-1]
+ del self.values[-1]
+
+ def get_sorted_X(self, x, n=1):
+ query = ['SELECT DISTINCT %s' % x,
+ self.FROM_WHERE ]
+ query.extend(self.constraints)
+ query.append('ORDER BY %s' % x)
+ c = db.conn.cursor()
+ try:
+ c.execute(' '.join(query), self.values)
+ if n == 1:
+ return [tpl[0] for tpl in c.fetchall()]
+ else:
+ return c.fetchall()
+ finally:
+ c.close()
+
+ def get_sorted_command_names(self):
+ return self.get_sorted_X('t.command')
+
+ def get_sorted_branches(self):
+ return self.get_sorted_X('k.branch')
+
+ def get_sorted_revisions(self):
+ return self.get_sorted_X('k.revision')
+
+ def get_sorted_levels_spread(self):
+ return self.get_sorted_X('k.wc_levels,k.wc_spread', n = 2)
+
+ def count_runs_batches(self):
+ query = ["""SELECT
+ count(DISTINCT r.run_id),
+ count(DISTINCT b.batch_id)""",
+ self.FROM_WHERE ]
+ query.extend(self.constraints)
+ c = db.conn.cursor()
+ try:
+ #print ' '.join(query)
+ c.execute(' '.join(query), self.values)
+ return c.fetchone()
+ finally:
+ c.close()
+
+ def get_command_timings(self, command):
+ query = ["""SELECT
+ count(t.timing),
+ min(t.timing),
+ max(t.timing),
+ avg(t.timing)""",
+ self.FROM_WHERE ]
+ self.append_constraint('t.command', command)
+ try:
+ query.extend(self.constraints)
+ c = db.conn.cursor()
+ try:
+ c.execute(' '.join(query), self.values)
+ return c.fetchone()
+ finally:
+ c.close()
+ finally:
+ self.remove_last_constraint()
+
+ def get_timings(self):
+ if self.timings:
+ return self.timings
+ self.timings = {}
+ for command_name in self.get_sorted_command_names():
+ self.timings[command_name] = self.get_command_timings(command_name)
+ return self.timings
+
+
+# ------------------------------------------------------------ run tests
+
+
+def perform_run(batch, run_kind,
+ svn_bin, svnadmin_bin, verbose):
+
+ run = Run(batch, run_kind)
+
+ def create_tree(in_dir, _levels, _spread):
+ try:
+ os.mkdir(in_dir)
+ except:
+ pass
+
+ for i in range(_spread):
+ # files
+ fn = j(in_dir, next_unique_basename('file'))
+ f = open(fn, 'w')
+ f.write('This is %s\n' % fn)
+ f.close()
+
+ # dirs
+ if (_levels > 1):
+ dn = j(in_dir, next_unique_basename('dir'))
+ create_tree(dn, _levels - 1, _spread)
+
+ def svn(*args):
+ name = args[0]
+
+ cmd = [ svn_bin ]
+ cmd.extend( list(args) )
+ if verbose:
+ print('svn cmd:', ' '.join(cmd))
+
+ stdin = None
+ if stdin:
+ stdin_arg = subprocess.PIPE
+ else:
+ stdin_arg = None
+
+ run.tic(name)
+ try:
+ p = subprocess.Popen(cmd,
+ stdin=stdin_arg,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=False)
+ stdout,stderr = p.communicate(input=stdin)
+ except OSError:
+ stdout = stderr = None
+ finally:
+ run.toc()
+
+ if verbose:
+ if (stdout):
+ print("STDOUT: [[[\n%s]]]" % ''.join(stdout))
+ if (stderr):
+ print("STDERR: [[[\n%s]]]" % ''.join(stderr))
+
+ return stdout,stderr
+
+
+ def add(*args):
+ return svn('add', *args)
+
+ def ci(*args):
+ return svn('commit', '-mm', *args)
+
+ def up(*args):
+ return svn('update', *args)
+
+ def st(*args):
+ return svn('status', *args)
+
+ def info(*args):
+ return svn('info', *args)
+
+ _chars = [chr(x) for x in range(ord('a'), ord('z') +1)]
+
+ def randstr(len=8):
+ return ''.join( [random.choice(_chars) for i in range(len)] )
+
+ def _copy(path):
+ dest = next_unique_basename(path + '_copied')
+ svn('copy', path, dest)
+
+ def _move(path):
+ dest = path + '_moved'
+ svn('move', path, dest)
+
+ def _propmod(path):
+ so, se = svn('proplist', path)
+ propnames = [line.strip() for line in so.strip().split('\n')[1:]]
+
+ # modify?
+ if len(propnames):
+ svn('ps', propnames[len(propnames) / 2], randstr(), path)
+
+ # del?
+ if len(propnames) > 1:
+ svn('propdel', propnames[len(propnames) / 2], path)
+
+ def _propadd(path):
+ # set a new one.
+ svn('propset', randstr(), randstr(), path)
+
+ def _mod(path):
+ if os.path.isdir(path):
+ _propmod(path)
+ return
+
+ f = open(path, 'a')
+ f.write('\n%s\n' % randstr())
+ f.close()
+
+ def _add(path):
+ if os.path.isfile(path):
+ return _mod(path)
+
+ if random.choice((True, False)):
+ # create a dir
+ svn('mkdir', j(path, next_unique_basename('new_dir')))
+ else:
+ # create a file
+ new_path = j(path, next_unique_basename('new_file'))
+ f = open(new_path, 'w')
+ f.write(randstr())
+ f.close()
+ svn('add', new_path)
+
+ def _del(path):
+ svn('delete', path)
+
+ _mod_funcs = (_mod, _add, _propmod, _propadd, )#_copy,) # _move, _del)
+
+ def modify_tree(in_dir, fraction):
+ child_names = os.listdir(in_dir)
+ for child_name in child_names:
+ if child_name[0] == '.':
+ continue
+ if random.random() < fraction:
+ path = j(in_dir, child_name)
+ random.choice(_mod_funcs)(path)
+
+ for child_name in child_names:
+ if child_name[0] == '.': continue
+ path = j(in_dir, child_name)
+ if os.path.isdir(path):
+ modify_tree(path, fraction)
+
+ def propadd_tree(in_dir, fraction):
+ for child_name in os.listdir(in_dir):
+ if child_name[0] == '.': continue
+ path = j(in_dir, child_name)
+ if random.random() < fraction:
+ _propadd(path)
+ if os.path.isdir(path):
+ propadd_tree(path, fraction)
+
+
+ def rmtree_onerror(func, path, exc_info):
+ """Error handler for ``shutil.rmtree``.
+
+ If the error is due to an access error (read only file)
+ it attempts to add write permission and then retries.
+
+ If the error is for another reason it re-raises the error.
+
+ Usage : ``shutil.rmtree(path, onerror=onerror)``
+ """
+ if not os.access(path, os.W_OK):
+ # Is the error an access error ?
+ os.chmod(path, stat.S_IWUSR)
+ func(path)
+ else:
+ raise
+
+ base = tempfile.mkdtemp()
+
+ # ensure identical modifications for every run
+ random.seed(0)
+
+ aborted = True
+
+ try:
+ repos = j(base, 'repos')
+ repos = repos.replace('\\', '/')
+ wc = j(base, 'wc')
+ wc2 = j(base, 'wc2')
+
+ if repos.startswith('/'):
+ file_url = 'file://%s' % repos
+ else:
+ file_url = 'file:///%s' % repos
+
+ print('\nRunning svn benchmark in', base)
+ print('dir levels: %s; new files and dirs per leaf: %s' %(
+ run_kind.levels, run_kind.spread))
+
+ started = datetime.datetime.now()
+
+ try:
+ run_cmd([svnadmin_bin, 'create', repos])
+ svn('checkout', file_url, wc)
+
+ trunk = j(wc, 'trunk')
+ create_tree(trunk, run_kind.levels, run_kind.spread)
+ add(trunk)
+ st(wc)
+ ci(wc)
+ up(wc)
+ propadd_tree(trunk, 0.05)
+ ci(wc)
+ up(wc)
+ st(wc)
+ info('-R', wc)
+
+ trunk_url = file_url + '/trunk'
+ branch_url = file_url + '/branch'
+
+ svn('copy', '-mm', trunk_url, branch_url)
+ st(wc)
+
+ up(wc)
+ st(wc)
+ info('-R', wc)
+
+ svn('checkout', trunk_url, wc2)
+ st(wc2)
+ modify_tree(wc2, 0.5)
+ st(wc2)
+ ci(wc2)
+ up(wc2)
+ up(wc)
+
+ svn('switch', branch_url, wc2)
+ modify_tree(wc2, 0.5)
+ st(wc2)
+ info('-R', wc2)
+ ci(wc2)
+ up(wc2)
+ up(wc)
+
+ modify_tree(trunk, 0.5)
+ st(wc)
+ ci(wc)
+ up(wc2)
+ up(wc)
+
+ svn('merge', '--accept=postpone', trunk_url, wc2)
+ st(wc2)
+ info('-R', wc2)
+ svn('resolve', '--accept=mine-conflict', wc2)
+ st(wc2)
+ svn('resolved', '-R', wc2)
+ st(wc2)
+ info('-R', wc2)
+ ci(wc2)
+ up(wc2)
+ up(wc)
+
+ svn('merge', '--accept=postpone', '--reintegrate', branch_url, trunk)
+ st(wc)
+ svn('resolve', '--accept=mine-conflict', wc)
+ st(wc)
+ svn('resolved', '-R', wc)
+ st(wc)
+ ci(wc)
+ up(wc2)
+ up(wc)
+
+ svn('delete', j(wc, 'branch'))
+ ci(wc)
+ up(wc)
+
+ aborted = False
+
+ finally:
+ stopped = datetime.datetime.now()
+ print('\nDone with svn benchmark in', (stopped - started))
+
+ run.remember_timing(TOTAL_RUN,
+ timedelta_to_seconds(stopped - started))
+ finally:
+ run.done(aborted)
+ run.submit_timings()
+ shutil.rmtree(base, onerror=rmtree_onerror)
+
+ return aborted
+
+
+# ---------------------------------------------------------------------
+
+
+def cmdline_run(db, options, run_kind_str, N=1):
+ run_kind = parse_one_timing_selection(db, run_kind_str)
+
+ N = int(N)
+
+ print('Hi, going to run a Subversion benchmark series of %d runs...' % N)
+ print('Label is %s' % run_kind.label())
+
+ # can we run the svn binaries?
+ svn_bin = j(options.svn_bin_dir, 'svn')
+ svnadmin_bin = j(options.svn_bin_dir, 'svnadmin')
+
+ for b in (svn_bin, svnadmin_bin):
+ so,se = run_cmd([b, '--version'])
+ if not so:
+ bail("Can't run %s" % b)
+
+ print(', '.join([s.strip() for s in so.split('\n')[:2]]))
+
+ batch = Batch(db)
+
+ for i in range(N):
+ print('Run %d of %d' % (i + 1, N))
+ perform_run(batch, run_kind,
+ svn_bin, svnadmin_bin, options.verbose)
+
+ batch.done()
+
+
+def cmdline_list(db, options, *args):
+ run_kinds = parse_timings_selections(db, *args)
+
+ for run_kind in run_kinds:
+
+ constraints = []
+ def add_if_not_none(name, val):
+ if val:
+ constraints.append(' %s = %s' % (name, val))
+ add_if_not_none('branch', run_kind.branch)
+ add_if_not_none('revision', run_kind.revision)
+ add_if_not_none('levels', run_kind.levels)
+ add_if_not_none('spread', run_kind.spread)
+ if constraints:
+ print('For\n', '\n'.join(constraints))
+ print('I found:')
+
+ d = TimingQuery(db, run_kind)
+
+ cmd_names = d.get_sorted_command_names()
+ if cmd_names:
+ print('\n%d command names:\n ' % len(cmd_names), '\n '.join(cmd_names))
+
+ branches = d.get_sorted_branches()
+ if branches and (len(branches) > 1 or branches[0] != run_kind.branch):
+ print('\n%d branches:\n ' % len(branches), '\n '.join(branches))
+
+ revisions = d.get_sorted_revisions()
+ if revisions and (len(revisions) > 1 or revisions[0] != run_kind.revision):
+ print('\n%d revisions:\n ' % len(revisions), '\n '.join(revisions))
+
+ levels_spread = d.get_sorted_levels_spread()
+ if levels_spread and (
+ len(levels_spread) > 1
+ or levels_spread[0] != (run_kind.levels, run_kind.spread)):
+ print('\n%d kinds of levels x spread:\n ' % len(levels_spread), '\n '.join(
+ [ ('%dx%d' % (l, s)) for l,s in levels_spread ]))
+
+ print("\n%d runs in %d batches.\n" % (d.count_runs_batches()))
+
+
+def cmdline_show(db, options, *run_kind_strings):
+ run_kinds = parse_timings_selections(db, *run_kind_strings)
+ for run_kind in run_kinds:
+ q = TimingQuery(db, run_kind)
+ timings = q.get_timings()
+
+ s = []
+ s.append('Timings for %s' % run_kind.label())
+ s.append(' N min max avg operation (unit is seconds)')
+
+ for command_name in q.get_sorted_command_names():
+ if options.command_names and command_name not in options.command_names:
+ continue
+ n, tmin, tmax, tavg = timings[command_name]
+
+ s.append('%4s %7.2f %7.2f %7.2f %s' % (
+ n_label(n),
+ tmin,
+ tmax,
+ tavg,
+ command_name))
+
+ print('\n'.join(s))
+
+
+def cmdline_compare(db, options, *args):
+ run_kinds = parse_timings_selections(db, *args)
+ if len(run_kinds) < 2:
+ bail("Need at least two sets of timings to compare.")
+
+
+ left_kind = run_kinds[0]
+ leftq = TimingQuery(db, left_kind)
+ left = leftq.get_timings()
+ if not left:
+ bail("No timings for %s" % left_kind.label())
+
+ for run_kind_idx in range(1, len(run_kinds)):
+ right_kind = run_kinds[run_kind_idx]
+
+ rightq = TimingQuery(db, right_kind)
+ right = rightq.get_timings()
+ if not right:
+ print("No timings for %s" % right_kind.label())
+ continue
+
+ label = 'Compare %s to %s' % (right_kind.label(), left_kind.label())
+
+ s = [label]
+
+ verbose = options.verbose
+ if not verbose:
+ s.append(' N avg operation')
+ else:
+ s.append(' N min max avg operation')
+
+ command_names = [name for name in leftq.get_sorted_command_names()
+ if name in right]
+ if options.command_names:
+ command_names = [name for name in command_names
+ if name in options.command_names]
+
+ for command_name in command_names:
+ left_N, left_min, left_max, left_avg = left[command_name]
+ right_N, right_min, right_max, right_avg = right[command_name]
+
+ N_str = '%s/%s' % (n_label(left_N), n_label(right_N))
+ avg_str = '%7.2f|%+7.3f' % (do_div(left_avg, right_avg),
+ do_diff(left_avg, right_avg))
+
+ if not verbose:
+ s.append('%9s %-16s %s' % (N_str, avg_str, command_name))
+ else:
+ min_str = '%7.2f|%+7.3f' % (do_div(left_min, right_min),
+ do_diff(left_min, right_min))
+ max_str = '%7.2f|%+7.3f' % (do_div(left_max, right_max),
+ do_diff(left_max, right_max))
+
+ s.append('%9s %-16s %-16s %-16s %s' % (N_str, min_str, max_str, avg_str,
+ command_name))
+
+ s.extend([
+ '(legend: "1.23|+0.45" means: slower by factor 1.23 and by 0.45 seconds;',
+ ' factor < 1 and seconds < 0 means \'%s\' is faster.'
+ % right_kind.label(),
+ ' "2/3" means: \'%s\' has 2 timings on record, the other has 3.)'
+ % left_kind.label()
+ ])
+
+
+ print('\n'.join(s))
+
+
+# ------------------------------------------------------- charts
+
+def cmdline_chart_compare(db, options, *args):
+ import matplotlib
+ matplotlib.use('Agg')
+ import numpy as np
+ import matplotlib.pylab as plt
+
+ labels = []
+ timing_sets = []
+ command_names = None
+
+ run_kinds = parse_timings_selections(db, *args)
+
+ # iterate the timings selections and accumulate data
+ for run_kind in run_kinds:
+ query = TimingQuery(db, run_kind)
+ timings = query.get_timings()
+ if not timings:
+ print("No timings for %s" % run_kind.label())
+ continue
+ labels.append(run_kind.label())
+ timing_sets.append(timings)
+
+ # it only makes sense to compare those commands that have timings
+ # in the first selection, because that is the one everything else
+ # is compared to. Remember the first selection's command names.
+ if not command_names:
+ command_names = query.get_sorted_command_names()
+
+
+ if len(timing_sets) < 2:
+ bail("Not enough timings")
+
+ if options.command_names:
+ command_names = [name for name in command_names
+ if name in options.command_names]
+
+ chart_path = options.chart_path
+ if not chart_path:
+ chart_path = 'compare_' + '_'.join(
+ [ filesystem_safe_string(l) for l in labels ]
+ ) + '.svg'
+
+ N = len(command_names)
+ M = len(timing_sets) - 1
+ if M < 2:
+ M = 2
+
+ group_positions = np.arange(N) # the y locations for the groups
+ dist = 1. / (1. + M)
+ height = (1. - dist) / M # the height of the bars
+
+ fig = plt.figure(figsize=(12, 5 + 0.2*N*M))
+ plot1 = fig.add_subplot(121)
+ plot2 = fig.add_subplot(122)
+
+ left = timing_sets[0]
+
+ # Iterate timing sets. Each loop produces one bar for each command name
+ # group.
+ for label_i,label in enumerate(labels[1:],1):
+ right = timing_sets[label_i]
+ if not right:
+ continue
+
+ for cmd_i, command_name in enumerate(command_names):
+ if command_name not in right:
+ #skip
+ continue
+
+ left_N, left_min, left_max, left_avg = left[command_name]
+ right_N, right_min, right_max, right_avg = right[command_name]
+
+ div_avg = 100. * (do_div(left_avg, right_avg) - 1.0)
+ if div_avg <= 0:
+ col = '#55dd55'
+ else:
+ col = '#dd5555'
+
+ diff_val = do_diff(left_avg, right_avg)
+
+ ofs = (dist + height) / 2. + height * (label_i - 1)
+
+ barheight = height * (1.0 - dist)
+
+ y = float(cmd_i) + ofs
+
+ plot1.barh((y, ),
+ (div_avg, ),
+ barheight,
+ color=col, edgecolor='white')
+ plot1.text(0., y + height/2.,
+ '%s %+5.1f%%' % (label, div_avg),
+ ha='right', va='center', size='small',
+ rotation=0, family='monospace')
+
+ plot2.barh((y, ),
+ (diff_val, ),
+ barheight,
+ color=col, edgecolor='white')
+ plot2.text(0., y + height/2.,
+ '%s %+6.2fs' % (label, diff_val),
+ ha='right', va='center', size='small',
+ rotation=0, family='monospace')
+
+
+ for p in (plot1, plot2):
+ xlim = list(p.get_xlim())
+ if xlim[1] < 10.:
+ xlim[1] = 10.
+ # make sure the zero line is far enough right so that the annotations
+ # fit inside the chart. About half the width should suffice.
+ if xlim[0] > -xlim[1]:
+ xlim[0] = -xlim[1]
+ p.set_xlim(*xlim)
+ p.set_xticks((0,))
+ p.set_yticks(group_positions + (height / 2.))
+ p.set_yticklabels(())
+ p.set_ylim((len(command_names), 0))
+ p.grid()
+
+ plot1.set_xticklabels(('+-0%',), rotation=0)
+ plot1.set_title('Average runtime change from %s in %%' % labels[0],
+ size='medium')
+
+ plot2.set_xticklabels(('+-0s',), rotation=0)
+ plot2.set_title('Average runtime change from %s in seconds' % labels[0],
+ size='medium')
+
+ margin = 1./(2 + N*M)
+ titlemargin = 0
+ if options.title:
+ titlemargin = margin * 1.5
+
+ fig.subplots_adjust(left=0.005, right=0.995, wspace=0.3, bottom=margin,
+ top=1.0-margin-titlemargin)
+
+ ystep = (1.0 - 2.*margin - titlemargin) / len(command_names)
+
+ for idx,command_name in enumerate(command_names):
+ ylabel = '%s\nvs. %.1fs' % (
+ command_name,
+ left[command_name][3])
+
+ ypos=1.0 - margin - titlemargin - ystep/M - ystep * idx
+ plt.figtext(0.5, ypos,
+ command_name,
+ ha='center', va='top',
+ size='medium', weight='bold')
+ plt.figtext(0.5, ypos - ystep/(M+1),
+ '%s\n= %.2fs' % (
+ labels[0], left[command_name][3]),
+ ha='center', va='top',
+ size='small')
+
+ if options.title:
+ plt.figtext(0.5, 1. - titlemargin/2, options.title, ha='center',
+ va='center', weight='bold')
+
+ plt.savefig(chart_path)
+ print('wrote chart file:', chart_path)
+
+
+# ------------------------------------------------------------ main
+
+
+# Custom option formatter, keeping newlines in the description.
+# adapted from:
+# http://groups.google.com/group/comp.lang.python/msg/09f28e26af0699b1
+import textwrap
+class IndentedHelpFormatterWithNL(optparse.IndentedHelpFormatter):
+ def format_description(self, description):
+ if not description: return ""
+ desc_width = self.width - self.current_indent
+ indent = " "*self.current_indent
+ bits = description.split('\n')
+ formatted_bits = [
+ textwrap.fill(bit,
+ desc_width,
+ initial_indent=indent,
+ subsequent_indent=indent)
+ for bit in bits]
+ result = "\n".join(formatted_bits) + "\n"
+ return result
+
+if __name__ == '__main__':
+ parser = optparse.OptionParser(formatter=IndentedHelpFormatterWithNL())
+ # -h is automatically added.
+ ### should probably expand the help for that. and see about -?
+ parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
+ help='Verbose operation')
+ parser.add_option('-b', '--svn-bin-dir', action='store', dest='svn_bin_dir',
+ default='',
+ help='Specify directory to find Subversion binaries in')
+ parser.add_option('-f', '--db-path', action='store', dest='db_path',
+ default='benchmark.db',
+ help='Specify path to SQLite database file')
+ parser.add_option('-o', '--chart-path', action='store', dest='chart_path',
+ help='Supply a path for chart output.')
+ parser.add_option('-c', '--command-names', action='store',
+ dest='command_names',
+ help='Comma separated list of command names to limit to.')
+ parser.add_option('-t', '--title', action='store',
+ dest='title',
+ help='For charts, a title to print in the chart graphics.')
+
+ parser.set_description(__doc__)
+ parser.set_usage('')
+
+
+ options, args = parser.parse_args()
+
+ def usage(msg=None):
+ parser.print_help()
+ if msg:
+ print("")
+ print(msg)
+ bail()
+
+ # there should be at least one arg left: the sub-command
+ if not args:
+ usage('No command argument supplied.')
+
+ cmd = args[0]
+ del args[0]
+
+ db = TimingsDb(options.db_path)
+
+ if cmd == 'run':
+ if len(args) < 1 or len(args) > 2:
+ usage()
+ cmdline_run(db, options, *args)
+
+ elif cmd == 'compare':
+ if len(args) < 2:
+ usage()
+ cmdline_compare(db, options, *args)
+
+ elif cmd == 'list':
+ cmdline_list(db, options, *args)
+
+ elif cmd == 'show':
+ cmdline_show(db, options, *args)
+
+ elif cmd == 'chart':
+ if 'compare'.startswith(args[0]):
+ cmdline_chart_compare(db, options, *args[1:])
+ else:
+ usage()
+
+ else:
+ usage('Unknown subcommand argument: %s' % cmd)
diff --git a/tools/dev/benchmarks/suite1/cronjob b/tools/dev/benchmarks/suite1/cronjob
new file mode 100755
index 0000000..5b74292
--- /dev/null
+++ b/tools/dev/benchmarks/suite1/cronjob
@@ -0,0 +1,102 @@
+#!/bin/bash
+#
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+#
+# This is the cronjob as run on our ASF box aka svn-qavm.
+# It uses neels' mad bash script magic called 'pat' to update and
+# build the latest trunk, invokes a benchmark and sends as mail.
+#
+# A word on 'pat': this is a grown-out-of-proportions bash script that holds
+# all the small and large tasks that I do while developing on Subversion.
+# While it works for me, it's not particularly beautifully coded --
+# wouldn't publish it in Subversion's trunk, but if you want to find out
+# what it does: http://hofmeyr.de/pat/
+
+#EMAILS=your@ema.il add@ress.es
+EMAILS=dev@subversion.apache.org
+
+echo
+echo "--------------------------------------------------------------------"
+date
+echo
+
+results="$(tempfile)"
+
+benchdir=/home/neels/svnbench
+patbin=/home/neels/bin/pat
+patbase=/home/neels/pat
+
+
+# first update trunk to HEAD and rebuild.
+# update/build is logged to the cronjob log (via stdout)
+
+cd "$patbase/trunk"
+"$patbin" update
+
+if [ "$?" -ne "0" ]; then
+ subject="Failed to update to HEAD."
+ echo "$subject" > "$results"
+ echo "$subject"
+else
+
+ rev="$("$patbase"/stable/prefix/bin/svn info "$patbase"/trunk/src | grep Revision)"
+ if [ -z "$rev" ]; then
+ subject="Working copy problem."
+ echo "$subject" > "$results"
+ echo "$subject"
+ else
+
+ NONMAINTAINER=1 "$patbin" remake
+ if [ "$?" -ne "0" ]; then
+ subject="Failed to build $rev."
+ echo "$subject" > "$results"
+ echo "$subject"
+ else
+
+
+ # updating and building succeeded!
+ # run the benchmark:
+
+ compiled="$("$patbase"/trunk/prefix/bin/svn --version | grep "compiled")"
+ subject="$rev$compiled"
+
+ cd "$benchdir"
+
+ # make more or less sure that runs don't leak into each other via
+ # I/O caching.
+ sync
+
+ # basically, just run it. But also, I want to
+ # - append output to stdout, for cronjob logging.
+ # - send output as mail, but only this run's output less update&build
+ time -p ./run 2>&1 | tee "$results"
+ time -p ./generate_charts 2>&1 | tee -a "$results"
+ fi
+ fi
+fi
+
+if [ -n "$EMAILS" ]; then
+ cat "$results" | mail -s "[svnbench] $subject" $EMAILS
+else
+ echo "No email addresses configured."
+fi
+
+rm "$results"
+
diff --git a/tools/dev/benchmarks/suite1/crontab.entry b/tools/dev/benchmarks/suite1/crontab.entry
new file mode 100644
index 0000000..23f7aa4
--- /dev/null
+++ b/tools/dev/benchmarks/suite1/crontab.entry
@@ -0,0 +1,5 @@
+# This invokes the benchmarking cronjob as run on our ASF box aka svn-qavm
+# (ask danielsh about the VM).
+# m h dom mon dow command
+21 0 * * Mon /home/neels/svnbench/cronjob >>/home/neels/cronjob.log 2>&1
+
diff --git a/tools/dev/benchmarks/suite1/generate_charts b/tools/dev/benchmarks/suite1/generate_charts
new file mode 100755
index 0000000..8e16526
--- /dev/null
+++ b/tools/dev/benchmarks/suite1/generate_charts
@@ -0,0 +1,60 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+SVN_A_NAME="1.7.0"
+SVN_B_NAME="trunk"
+
+# benchmark script and parameters...
+benchmark="$PWD/benchmark.py"
+
+db="$PWD/${SVNBENCH_DEBUG}benchmark.db"
+
+chartsdir="$PWD/charts"
+
+mkdir -p "$chartsdir/.busy"
+
+if [ ! -e "$chartsdir/README" ]; then
+ cat > "$chartsdir/README" <<END
+These charts are generated by svn benchmark suite1.
+http://svn.apache.org/repos/asf/subversion/trunk/tools/dev/benchmarks/suite1
+
+*DISCLAIMER* - This tests only file://-URL access on a GNU/Linux VM.
+This is intended to measure changes in performance of the local working
+copy layer, *only*. These results are *not* generally true for everyone.
+END
+fi
+
+for levelspread in "" 5x5 1x100 100x1; do
+ if [ -z "$levelspread" ]; then
+ lsarg=""
+ lstitle=""
+ else
+ lsarg=",$levelspread"
+ lstitle=", WC dir levels x spread = $levelspread"
+ fi
+ N=12
+ "$benchmark" -f "$db" chart compare \
+ ${SVN_A_NAME}$lsarg ${SVN_B_NAME}@last${N}$lsarg \
+ -o "$chartsdir/.busy/compare_${SVN_A_NAME}_${SVN_B_NAME}@last${N}$lsarg.svg" \
+ -t "svn client benchmarks, file:// access *only*$lstitle"
+done
+
+mv "$chartsdir/.busy/"*.svg "$chartsdir/"
+rmdir "$chartsdir/.busy"
+
diff --git a/tools/dev/benchmarks/suite1/run b/tools/dev/benchmarks/suite1/run
new file mode 100755
index 0000000..c146ea0
--- /dev/null
+++ b/tools/dev/benchmarks/suite1/run
@@ -0,0 +1,145 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# debug? Just uncomment.
+#SVNBENCH_DEBUG=DEBUG_
+if [ -n "$SVNBENCH_DEBUG" ]; then
+ SVNBENCH_DEBUG="DEBUG_"
+fi
+
+# Subversion bin-dir used for maintenance of working copies
+SVN_STABLE="$HOME/pat/stable/prefix/bin/"
+
+# Where to find the svn binaries you want to benchmark, what are their labels
+# and Last Changed Revisions?
+# side A
+SVN_A_NAME="1.7.0"
+SVN_A="$HOME/pat/bench/prefix/bin"
+SVN_A_REV="$("$SVN_STABLE"/svnversion -c "$HOME/pat/bench/src" | sed 's/.*://')"
+
+# side B
+SVN_B_NAME="trunk"
+SVN_B="$HOME/pat/trunk/prefix/bin"
+SVN_B_REV="$("$SVN_STABLE"/svnversion -c "$HOME/pat/trunk/src" | sed 's/.*://')"
+
+echo "$SVN_A_NAME@$SVN_A_REV vs. $SVN_B_NAME@$SVN_B_REV"
+
+# benchmark script and parameters...
+benchmark="$PWD/benchmark.py"
+
+db="$PWD/${SVNBENCH_DEBUG}benchmark.db"
+
+batch(){
+ levels="$1"
+ spread="$2"
+ N="$3"
+
+ # SVN_A is a fixed tag, currently 1.7.0. For each call, run this once.
+ # It will be called again and again for each trunk build being tested,
+ # that's why we don't really need to run it $N times every time.
+ N_for_A=1
+ "$benchmark" "--db-path=$db" "--svn-bin-dir=$SVN_A" \
+ run "$SVN_A_NAME@$SVN_A_REV,${levels}x$spread" "$N_for_A" >/dev/null
+
+ # SVN_B is a branch, i.e. the moving target, benchmarked at a specific
+ # point in history each time this script is called. Run this $N times.
+ "$benchmark" "--db-path=$db" "--svn-bin-dir=$SVN_B" \
+ run "$SVN_B_NAME@$SVN_B_REV,${levels}x$spread" $N >/dev/null
+}
+
+N=3
+al=5
+as=5
+bl=100
+bs=1
+cl=1
+cs=100
+
+if [ -n "$SVNBENCH_DEBUG" ]; then
+ echo "DEBUG"
+ N=1
+ al=1
+ as=1
+ bl=2
+ bs=1
+ cl=1
+ cs=2
+fi
+
+
+{
+started="$(date)"
+echo "Started at $started"
+
+echo "
+*DISCLAIMER* - This tests only file://-URL access on a GNU/Linux VM.
+This is intended to measure changes in performance of the local working
+copy layer, *only*. These results are *not* generally true for everyone.
+
+Charts of this data are available at http://svn-qavm.apache.org/charts/"
+
+if [ -z "$SVNBENCH_SUMMARY_ONLY" ]; then
+ batch $al $as $N
+ batch $bl $bs $N
+ batch $cl $cs $N
+else
+ echo "(not running benchmarks, just printing results on record.)"
+fi
+
+echo ""
+echo "Averaged-total results across all runs:"
+echo "---------------------------------------"
+echo ""
+"$benchmark" "--db-path=$db" \
+ compare "$SVN_A_NAME" "$SVN_B_NAME@$SVN_B_REV"
+
+echo ""
+echo ""
+echo "Above totals split into separate <dir-levels>x<dir-spread> runs:"
+echo "----------------------------------------------------------------"
+echo ""
+
+for lvlspr in "${al}x${as}" "${bl}x${bs}" "${cl}x${cs}"; do
+ "$benchmark" "--db-path=$db" \
+ compare "$SVN_A_NAME,$lvlspr" "$SVN_B_NAME@$SVN_B_REV,$lvlspr"
+ echo ""
+done
+
+echo ""
+echo ""
+echo "More detail:"
+echo "------------"
+echo ""
+
+for lvlspr in "${al}x${as}" "${bl}x${bs}" "${cl}x${cs}" "" ; do
+ "$benchmark" "--db-path=$db" show "$SVN_A_NAME,$lvlspr"
+ echo --
+ "$benchmark" "--db-path=$db" show "$SVN_B_NAME@$SVN_B_REV,$lvlspr"
+ echo --
+ "$benchmark" "--db-path=$db" \
+ compare -v "$SVN_A_NAME,$lvlspr" "$SVN_B_NAME@$SVN_B_REV,$lvlspr"
+ echo ""
+ echo ""
+done
+
+echo ""
+echo "Had started at $started,"
+echo " done at $(date)"
+} 2>&1 | tee results.txt
+
diff --git a/tools/dev/benchmarks/suite1/run.bat b/tools/dev/benchmarks/suite1/run.bat
new file mode 100644
index 0000000..6d3d466
--- /dev/null
+++ b/tools/dev/benchmarks/suite1/run.bat
@@ -0,0 +1,105 @@
+:: Licensed to the Apache Software Foundation (ASF) under one
+:: or more contributor license agreements. See the NOTICE file
+:: distributed with this work for additional information
+:: regarding copyright ownership. The ASF licenses this file
+:: to you under the Apache License, Version 2.0 (the
+:: "License"); you may not use this file except in compliance
+:: with the License. You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing,
+:: software distributed under the License is distributed on an
+:: "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+:: KIND, either express or implied. See the License for the
+:: specific language governing permissions and limitations
+:: under the License.
+
+@ECHO OFF
+
+ECHO.THIS SCRIPT IS CURRENTLY OUTDATED.
+GOTO :EOF
+
+SETLOCAL EnableDelayedExpansion
+
+:: Where are the svn binaries you want to benchmark?
+SET SVN_1_6=C:\path\to\1.6-svn\bin\svn
+SET SVN_trunk=C:\path\to\trunk-svn\bin\svn
+
+SET benchmark=%CD%\benchmark.py
+
+SET my_datetime=%date%-%time%
+SET my_datetime=%my_datetime: =_%
+SET my_datetime=%my_datetime:/=_%
+SET my_datetime=%my_datetime::=%
+SET my_datetime=%my_datetime:.=%
+SET my_datetime=%my_datetime:,=%
+SET parent=%my_datetime%
+SET inital_workdir=%CD%
+mkdir "%parent%"
+cd "%parent%"
+ECHO %CD%
+
+GOTO main
+
+:batch
+ SET levels=%1
+ SET spread=%2
+ SET N=%3
+ SET pre=%levels%x%spread%_
+ ECHO.
+ ECHO.---------------------------------------------------------------------
+ ECHO.
+ ECHO.Results for dir levels: %levels% spread: %spread%
+ CALL "%benchmark%" --svn="%SVN_1_6%" run %pre%1.6 %levels% %spread% %N% > NUL
+ CALL "%benchmark%" --svn="%SVN_trunk%" run %pre%trunk %levels% %spread% %N% > NUL
+ CALL "%benchmark%" compare %pre%1.6 %pre%trunk
+ GOTO :EOF
+
+:main
+SET N=6
+SET al=5
+SET as=5
+SET bl=25
+SET bs=1
+SET cl=1
+SET cs=100
+
+::::DEBUG
+::SET N=1
+::SET al=1
+::SET as=1
+::SET bl=2
+::SET bs=1
+::SET cl=1
+::SET cs=2
+::::DEBUG
+
+SET started=%date%-%time%
+ECHO.Started at %started%
+ECHO.
+
+CALL :batch %al% %as% %N%
+CALL :batch %bl% %bs% %N%
+CALL :batch %cl% %cs% %N%
+
+ECHO.
+ECHO.=========================================================================
+ECHO.
+FOR %%F IN (*x*_1.6) DO SET all_1.6=!all_1.6! %%F
+CALL "%benchmark%" combine total_1.6 %all_1.6% > NUL
+FOR %%F IN (*x*_trunk) DO SET all_trunk=!all_trunk! %%F
+CALL "%benchmark%" combine total_trunk %all_trunk% > NUL
+
+ECHO.comparing averaged totals..."
+CALL "%benchmark%" compare total_1.6 total_trunk
+
+ECHO.
+ECHO.Had started at %started%,
+ECHO. done at %date%-%time%
+ECHO %CD%
+
+cd "%inital_workdir%"
+IF EXIST %parent%\total_trunk rmdir /S /Q "%parent%"
+
+ENDLOCAL
diff --git a/tools/dev/build-svn-deps-win.pl b/tools/dev/build-svn-deps-win.pl
new file mode 100755
index 0000000..d936369
--- /dev/null
+++ b/tools/dev/build-svn-deps-win.pl
@@ -0,0 +1,919 @@
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+#
+# Script to build all the dependencies for Subversion on Windows
+# It's been written for Windows 8 and Visual Studio 2012, but
+# it's entirely possible it will work with older versions of both.
+
+# The goal here is not to necessarily have everyone using this script.
+# But rather to be able to produce binary packages of the dependencies
+# already built to allow developers to be able to download or checkout
+# Subversion and quickly get up a development environment.
+
+# Prerequisites:
+# Perl: http://www.activestate.com/activeperl/downloads
+# Python: http://www.activestate.com/activepython/downloads
+# 7-Zip: http://www.7-zip.org/download.html
+# CMake: http://www.cmake.org/cmake/resources/software.html
+# Microsoft Visual Studio 2012 (Ultimate has been tested, Express does not work)
+#
+# You probably want these on your PATH. The installers usually
+# offer an option to do that for you so if you can let them.
+#
+# You are expected to run this script within the correct Visual Studio
+# Shell. Probably "VS2012 x86 Native Tools Command Prompt". This
+# sets the proper PATH arguments so that the the compiler tools are
+# available.
+#
+# TODO:
+# Find some way to work around the lack of devenv in Express (msbuild will help some)
+# Include a package target that zips everything up.
+# Perl script that runs the Subversion get-make.py tool with the right args.
+# Alternatively update gen-make.py with an arg that knows about our layout.
+# Make the Windows build not expect to go looking into source code (httpd/zlib)
+# Add SWIG (to support checkout builds where SWIG generation hasn't been done).
+# Usage/help output from the usual flags/on error input.
+# Make SQLITE_VER friendly since we're using no dots right now.
+# Work out the fixes to the projects' sources and contribute them back.
+# Allow selection of Arch (x86 and x64)
+# ZLib support for OpenSSL (have to patch openssl)
+# Use CMake zlib build instead.
+# Assembler support for OpenSSL.
+# Add more specific commands to the command line (e.g. build-httpd)
+
+###################################
+###### V A R I A B L E S ######
+###################################
+package Vars;
+# variables in the Vars package can be overriden from the command
+# line with the FOO=BAR syntax. If you want any defaults to reference
+# other variables the defaults need to be in set_defaults() below to
+# allow the defaults to be set after processing user set variables.
+
+# Paths to commands to use, provide full paths if it's not
+# on your PATH already.
+our $SEVEN_ZIP = 'C:\Program Files\7-Zip\7z.exe';
+our $CMAKE = 'cmake';
+our $NMAKE = 'nmake';
+# Use the .com version so we get output, the .exe doesn't produce any output
+our $DEVENV = 'devenv.com';
+our $VCUPGRADE = 'vcupgrade';
+our $PYTHON = 'python';
+
+# Versions of the dependencies we will use
+# Change these if you want but these are known to work with
+# this script as is.
+our $HTTPD_VER = '2.4.4';
+our $APR_VER = '1.4.6';
+our $APU_VER = '1.5.2'; # apr-util version
+our $API_VER = '1.2.1'; # arp-iconv version
+our $ZLIB_VER = '1.2.8';
+our $OPENSSL_VER = '1.0.1e';
+our $PCRE_VER = '8.35';
+our $BDB_VER = '5.3.21';
+our $SQLITE_VER = '3071602';
+our $SERF_VER = '1.3.6';
+our $NEON_VER = '0.29.6';
+
+# Sources for files to download
+our $AWK_URL = 'http://www.cs.princeton.edu/~bwk/btl.mirror/awk95.exe';
+our $HTTPD_URL;
+our $APR_URL;
+our $APU_URL;
+our $API_URL;
+our $ZLIB_URL;
+our $OPENSSL_URL;
+our $PCRE_URL;
+our $BDB_URL;
+our $SQLITE_URL;
+our $SERF_URL;
+our $NEON_URL;
+our $PROJREF_URL = 'https://downloads.redhoundsoftware.com/blog/ProjRef.py';
+
+# Location of the already downloaded file.
+# by default these are undefined and set by the downloader.
+# However, they can be overriden from the commandline and then
+# the downloader is skipped. Note that BDB has no downloader
+# so it must be overriden from the command line.
+our $AWK_FILE;
+our $HTTPD_FILE;
+our $APR_FILE;
+our $APU_FILE;
+our $API_FILE;
+our $ZLIB_FILE;
+our $OPENSSL_FILE;
+our $PCRE_FILE;
+our $BDB_FILE;
+our $SQLITE_FILE;
+our $SERF_FILE;
+our $NEON_FILE;
+our $PROJREF_FILE;
+
+# Various directories we use
+our $TOPDIR = Cwd::cwd(); # top of our tree
+our $INSTDIR; # where we install to
+our $BLDDIR; # directory where we actually build
+our $SRCDIR; # directory where we store package files
+
+# Some other options
+our $VS_VER;
+our $NEON;
+our $SVN_VER = '1.9.x';
+our $DEBUG = 0;
+
+# Utility function to remove dots from a string
+sub remove_dots {
+ my $in = shift;
+
+ $in =~ tr/.//d;
+ return $in;
+}
+
+# unless the variable is already defined set the value
+sub set_default {
+ my $var = shift;
+ my $value = shift;
+
+ unless (defined($$var)) {
+ $$var = $value;
+ }
+}
+
+sub set_svn_ver_defaults {
+ my ($svn_major, $svn_minor, $svn_patch) = $SVN_VER =~ /^(\d+)\.(\d+)\.(.+)$/;
+
+ if ($svn_major > 1 or ($svn_major == 1 and $svn_minor >= 8)) {
+ $NEON=0 unless defined($NEON);
+ } else {
+ $NEON=1 unless defined($NEON);
+ }
+}
+
+# Any variables with defaults that reference other values
+# should be set here. This defers setting of the default until runtime in these cases.
+sub set_defaults {
+ set_default(\$HTTPD_URL, "http://archive.apache.org/dist/httpd/httpd-$HTTPD_VER.tar.bz2");
+ set_default(\$APR_URL, "http://archive.apache.org/dist/apr/apr-$APR_VER.tar.bz2");
+ set_default(\$APU_URL, "http://archive.apache.org/dist/apr/apr-util-$APU_VER.tar.bz2");
+ set_default(\$API_URL, "http://archive.apache.org/dist/apr/apr-iconv-$API_VER.tar.bz2");
+ set_default(\$ZLIB_URL, "http://sourceforge.net/projects/libpng/files/zlib/$ZLIB_VER/zlib" . remove_dots($ZLIB_VER) . '.zip');
+ set_default(\$OPENSSL_URL, "http://www.openssl.org/source/openssl-$OPENSSL_VER.tar.gz");
+ set_default(\$PCRE_URL, "ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-$PCRE_VER.zip");
+ set_default(\$BDB_URL, "http://download.oracle.com/berkeley-db/db-5.3.21.zip");
+ set_default(\$SQLITE_URL, "http://www.sqlite.org/2013/sqlite-amalgamation-$SQLITE_VER.zip");
+ set_default(\$SERF_URL, "https://archive.apache.org/dist/serf/serf-$SERF_VER.zip");
+ set_default(\$NEON_URL, "http://www.webdav.org/neon/neon-$NEON_VER.tar.gz");
+ set_default(\$INSTDIR, $TOPDIR);
+ set_default(\$BLDDIR, "$TOPDIR\\build");
+ set_default(\$SRCDIR, "$TOPDIR\\sources");
+ set_svn_ver_defaults();
+}
+
+#################################
+###### M A I N ######
+#################################
+# You shouldn't have any reason to modify below this unless you've changed
+# versions of something.
+package main;
+
+use warnings;
+use strict;
+
+use LWP::Simple;
+use File::Path;
+use File::Copy;
+use File::Basename;
+use File::Find;
+use Cwd;
+use Config;
+
+# Full path to perl, this shouldn't need to be messed with
+my $PERL = $Config{perlpath};
+
+# Directory constants that we setup for convenience, but that
+# shouldn't be changed since they are assumed in the build systems
+# of the various dependencies.
+my $HTTPD; # Where httpd gets built
+my $BDB; # Where bdb gets built
+my $BINDIR; # where binaries are installed
+my $LIBDIR; # where libraries are installed
+my $INCDIR; # where headers are installed
+my $SRCLIB; # httpd's srclib dir
+
+# defer setting these values till runtime so users can override the
+# user controlled vars they derive from.
+sub set_paths {
+ $HTTPD = "$BLDDIR\\httpd";
+ $BDB = "$BLDDIR\\bdb";
+ $BINDIR = "$INSTDIR\\bin";
+ $LIBDIR = "$INSTDIR\\lib";
+ $INCDIR = "$INSTDIR\\include";
+ $SRCLIB = "$HTTPD\\srclib";
+ # Add bin to PATH this will be needed for at least awk later on
+ $ENV{PATH} = "$BINDIR;$ENV{PATH}";
+ # Setup LIB and INCLUDE so we can find BDB
+ $ENV{LIB} = "$LIBDIR;$ENV{LIB}";
+ $ENV{INCLUDE} = "$INCDIR;$ENV{INCLUDE}";
+}
+
+#####################
+# UTILTIY FUNCTIONS #
+#####################
+
+# copy a file with error handling
+sub copy_or_die {
+ my $src = shift;
+ my $dest = shift;
+
+ copy($src, $dest) or die "Failed to copy $src to $dest: $!";
+}
+
+# Rename a file and deal with errors.
+sub rename_or_die {
+ my $src = shift;
+ my $dest = shift;
+
+ rename($src, $dest) or die "Failed to rename $src to $dest: $!";
+}
+
+# Utility function to chdir with error handling.
+sub chdir_or_die {
+ my $dir = shift;
+
+ chdir($dir) or die "Failed to chdir to $dir: $!";
+}
+
+# Utility function to call system with error handling.
+# First arg is an error message to print if something fails.
+# Remaining args are passed to system.
+sub system_or_die {
+ my $error_msg = shift;
+ unless (system(@_) == 0) {
+ if (defined($error_msg)) {
+ die "$error_msg (exit code: $?)";
+ } else {
+ die "Failed while running '@_' (exit code: $?)";
+ }
+ }
+}
+
+# Like perl -pi.orig the second arg is a reference to a
+# function that does whatever line processing you want.
+# Note that $_ is used for the input and output of the
+# function. So modifying $_ changes the line in the file.
+# bak can be passed to set the backup extension. If the
+# backup file already exists, shortcut this step.
+sub modify_file_in_place {
+ my $file = shift;
+ my $func = shift;
+ my $bak = shift;
+
+ unless (defined($bak)) {
+ $bak = '.orig';
+ }
+
+ my $backup = $file . $bak;
+ return if -e $backup;
+ rename_or_die($file, $backup);
+ open(IN, "<$backup") or die "Failed to open $backup: $!";
+ open(OUT, ">$file") or die "Failed to open $file: $!";
+ while (<IN>) {
+ &{$func}();
+ print OUT;
+ }
+ close(IN);
+ close(OUT);
+}
+
+sub check_vs_ver {
+ return if defined($VS_VER);
+
+ # using the vcupgrade command here because it has a consistent name and version
+ # numbering across versions including express versions.
+ my $help_output = `"$VCUPGRADE" /?`;
+ my ($major_version) = $help_output =~ /Version (\d+)\./s;
+
+ if (defined($major_version)) {
+ if ($major_version eq '12') {
+ $VS_VER = '2013';
+ return;
+ } elsif ($major_version eq '11') {
+ $VS_VER = '2012';
+ return;
+ } elsif ($major_version eq '10') {
+ $VS_VER = '2010';
+ return;
+ }
+ }
+
+ die("Visual Studio Version Not Supported");
+}
+
+##################
+# TREE STRUCTURE #
+##################
+
+# Create directories that this script directly needs
+sub prepare_structure {
+ # ignore errors the directories may already exist.
+ mkdir($BINDIR);
+ mkdir($SRCDIR);
+ mkdir($BLDDIR);
+ mkdir($LIBDIR);
+ mkdir($INCDIR);
+}
+
+# Remove paths created by this script (directly or indecirectly)
+# If the first arg is 1 it'll remove the downloaded files otherwise it
+# leaves them alone.
+sub clean_structure {
+ # ignore errors in this function the paths may not exist
+ my $real_clean = shift;
+
+ if ($real_clean) {
+ rmtree($SRCDIR);
+ }
+ rmtree($BINDIR);
+ rmtree($BLDDIR);
+ rmtree($INCDIR);
+ rmtree($LIBDIR);
+ rmtree("$INSTDIR\\serf");
+ rmtree("$INSTDIR\\neon");
+ rmtree("$INSTDIR\\sqlite-amalgamation");
+
+ # Dirs created indirectly by the install targets
+ rmtree("$INSTDIR\\man");
+ rmtree("$INSTDIR\\share");
+ rmtree("$INSTDIR\\ssl");
+ rmtree("$INSTDIR\\cgi-bin");
+ rmtree("$INSTDIR\\conf");
+ rmtree("$INSTDIR\\error");
+ rmtree("$INSTDIR\\htdocs");
+ rmtree("$INSTDIR\\icons");
+ rmtree("$INSTDIR\\logs");
+ rmtree("$INSTDIR\\manual");
+ rmtree("$INSTDIR\\modules");
+ unlink("$INSTDIR\\ABOUT_APACHE.txt");
+ unlink("$INSTDIR\\CHANGES.txt");
+ unlink("$INSTDIR\\INSTALL.txt");
+ unlink("$INSTDIR\\LICENSE.txt");
+ unlink("$INSTDIR\\NOTICE.txt");
+ unlink("$INSTDIR\\OPENSSL-NEWS.txt");
+ unlink("$INSTDIR\\OPENSSL-README.txt");
+ unlink("$INSTDIR\\README.txt");
+}
+
+############
+# DOWNLOAD #
+############
+
+# Download a url into a file if successful put the destination into the
+# variable referenced by $dest_ref.
+sub download_file {
+ my $url = shift;
+ my $file = shift;
+ my $dest_ref = shift;
+
+ # If the variable referenced by $dest_ref is already set, skip downloading
+ # means we've been asked to use an already downloaded file.
+ return if (defined($$dest_ref));
+
+ print "Downloading $url\n";
+ # Using mirror() here so that repeated runs shouldn't try to keep downloading
+ # the file.
+ my $response = mirror($url, $file);
+ if (is_error($response)) {
+ die "Couldn't save $url to $file received $response";
+ }
+ $$dest_ref = $file;
+}
+
+# Download all the dependencies we need
+sub download_dependencies {
+ # putting awk in sources is a bit of a hack but it lets us
+ # avoid having to figure out what to delete when cleaning bin
+ download_file($AWK_URL, "$SRCDIR\\awk.exe", \$AWK_FILE);
+ unless(-x "$BINDIR\\awk.exe") { # skip the copy if it exists
+ copy_or_die($AWK_FILE, "$BINDIR\\awk.exe");
+ }
+ download_file($PROJREF_URL, "$SRCDIR\\ProjRef.py", \$PROJREF_FILE);
+ unless(-x "$BINDIR\\ProjRef.py") { # skip the copy if it exists
+ copy_or_die($PROJREF_FILE, $BINDIR);
+ }
+ download_file($BDB_URL, "$SRCDIR\\db.zip", \$BDB_FILE);
+ download_file($ZLIB_URL, "$SRCDIR\\zlib.zip", \$ZLIB_FILE);
+ download_file($OPENSSL_URL, "$SRCDIR\\openssl.tar.gz", \$OPENSSL_FILE);
+ download_file($HTTPD_URL, "$SRCDIR\\httpd.tar.bz2", \$HTTPD_FILE);
+ download_file($APR_URL, "$SRCDIR\\apr.tar.bz2", \$APR_FILE);
+ download_file($APU_URL, "$SRCDIR\\apr-util.tar.bz2", \$APU_FILE);
+ download_file($API_URL, "$SRCDIR\\apr-iconv.tar.bz2", \$API_FILE);
+ download_file($PCRE_URL, "$SRCDIR\\pcre.zip", \$PCRE_FILE);
+ download_file($SQLITE_URL, "$SRCDIR\\sqlite-amalgamation.zip", \$SQLITE_FILE);
+ download_file($SERF_URL, "$SRCDIR\\serf.zip", \$SERF_FILE);
+ download_file($NEON_URL, "$SRCDIR\\neon.tar.gz", \$NEON_FILE) if $NEON;
+}
+
+##############
+# EXTRACTION #
+##############
+
+# Extract a compressed file with 7-zip into a given directory
+# Skip extraction if destination of rename_to or expected_name exists
+# if rename_to is set rename the path from expected_name to rename_to
+sub extract_file {
+ my $file = shift;
+ my $container = shift;
+ my $expected_name = shift;
+ my $rename_to = shift;
+
+ if (defined($rename_to)) {
+ return if -d $rename_to;
+ } elsif (defined($expected_name)) {
+ return if -d $expected_name;
+ }
+
+ my $dest_opt = "";
+ if (defined($container)) {
+ $dest_opt = qq(-o"$container" );
+ }
+
+ my $cmd;
+ if ($file =~ /\.tar\.(bz2|gz)$/) {
+ $cmd = qq("$SEVEN_ZIP" x "$file" -so | "$SEVEN_ZIP" x -y -si -ttar $dest_opt);
+ } else {
+ $cmd = qq("$SEVEN_ZIP" x -y $dest_opt $file);
+ }
+
+ system_or_die("Problem extracting $file", $cmd);
+ if (defined($rename_to)) {
+ rename_or_die($expected_name, $rename_to);
+ }
+}
+
+sub extract_dependencies {
+ extract_file($BDB_FILE, $BLDDIR,
+ "$BLDDIR\\db-$BDB_VER", "$BLDDIR\\bdb");
+ extract_file($HTTPD_FILE, $BLDDIR,
+ "$BLDDIR\\httpd-$HTTPD_VER", "$BLDDIR\\httpd");
+ extract_file($APR_FILE, $SRCLIB,
+ "$SRCLIB\\apr-$APR_VER", "$SRCLIB\\apr");
+ extract_file($APU_FILE, $SRCLIB,
+ "$SRCLIB\\apr-util-$APU_VER", "$SRCLIB\\apr-util");
+ extract_file($API_FILE, $SRCLIB,
+ "$SRCLIB\\apr-iconv-$API_VER", "$SRCLIB\\apr-iconv");
+ # We fix the line endings before putting the non-Apache deps in place since it
+ # touches everything under httpd and there's no point in doing other things.
+ httpd_fix_lineends();
+ extract_file($ZLIB_FILE, $SRCLIB,
+ "$SRCLIB\\zlib-$ZLIB_VER", "$SRCLIB\\zlib");
+ extract_file($OPENSSL_FILE, $SRCLIB,
+ "$SRCLIB\\openssl-$OPENSSL_VER", "$SRCLIB\\openssl");
+ extract_file($PCRE_FILE, $SRCLIB,
+ "$SRCLIB\\pcre-$PCRE_VER", "$SRCLIB\\pcre");
+ extract_file($SQLITE_FILE, $INSTDIR,
+ "$INSTDIR\\sqlite-amalgamation-$SQLITE_VER",
+ "$INSTDIR\\sqlite-amalgamation");
+ extract_file($SERF_FILE, $INSTDIR,
+ "$INSTDIR\\serf-$SERF_VER", "$INSTDIR\\serf");
+ extract_file($NEON_FILE, $INSTDIR,
+ "$INSTDIR\\neon-$NEON_VER", "$INSTDIR\\neon") if $NEON;
+}
+
+#########
+# BUILD #
+#########
+
+sub build_pcre {
+ chdir_or_die("$SRCLIB\\pcre");
+ my $pcre_generator = 'NMake Makefiles';
+ # Have to use RelWithDebInfo since httpd looks for the pdb files
+ my $pcre_build_type = '-DCMAKE_BUILD_TYPE:STRING=' . ($DEBUG ? 'Debug' : 'RelWithDebInfo');
+ my $pcre_options = '-DPCRE_NO_RECURSE:BOOL=ON';
+ my $pcre_shared_libs = '-DBUILD_SHARED_LIBS:BOOL=ON';
+ my $pcre_install_prefix = "-DCMAKE_INSTALL_PREFIX:PATH=$INSTDIR";
+ my $cmake_cmd = qq("$CMAKE" -G "$pcre_generator" "$pcre_build_type" "$pcre_shared_libs" "$pcre_install_prefix" "$pcre_options" .);
+ system_or_die("Failure generating pcre Makefiles", $cmake_cmd);
+ system_or_die("Failure building pcre", qq("$NMAKE"));
+ system_or_die("Failure testing pcre", qq("$NMAKE" test));
+ system_or_die("Failure installing pcre", qq("$NMAKE" install));
+ chdir_or_die($TOPDIR);
+}
+
+# This is based roughly off the build_zlib.bat that the Subversion Windows
+# build generates, it it doesn't match that then Subversion will fail to build.
+sub build_zlib {
+ chdir_or_die("$SRCLIB\\zlib");
+ $ENV{CC_OPTS} = $DEBUG ? '/MDd /Gm /ZI /Od /GZ /D_DEBUG' : '/MD /02 /Zi';
+ $ENV{COMMON_CC_OPTS} = '/nologo /W3 /DWIN32 /D_WINDOWS';
+
+ system_or_die("Failure building zilb", qq("$NMAKE" /nologo -f win32\\Makefile.msc STATICLIB=zlibstat.lib all));
+
+ delete $ENV{CC_OPTS};
+ delete $ENV{COMMON_CC_OPTS};
+
+ chdir_or_die($TOPDIR);
+}
+
+sub build_openssl {
+ chdir_or_die("$SRCLIB\\openssl");
+
+ # We're building openssl without an assembler. If someone wants to
+ # use this for production they should probably download NASM and
+ # remove the no-asm below and use ms\do_nasm.bat instead.
+
+ # TODO: Enable openssl to use zlib. openssl needs some patching to do
+ # this since it wants to look for zlib as zlib1.dll and as the httpd
+ # build instructions note you probably don't want to dynamic link zlib.
+
+ # TODO: OpenSSL requires perl on the path since it uses perl without a full
+ # path in the batch file and the makefiles. Probably should determine
+ # if PERL is on the path and add it here if not.
+
+ # The apache build docs suggest no-rc5 no-idea enable-mdc2 on top of what
+ # is used below, the primary driver behind that is patents, but I believe
+ # the rc5 and idea patents have expired.
+ my $platform = $DEBUG ? 'debug-VC-WIN32' : 'VC-WIN32';
+ system_or_die("Failure configuring openssl",
+ qq("$PERL" Configure no-asm "--prefix=$INSTDIR" $platform));
+ system_or_die("Failure building openssl (bat)", 'ms\do_ms.bat');
+ system_or_die("Failure building openssl (nmake)", qq("$NMAKE" /f ms\\ntdll.mak));
+ system_or_die("Failure testing openssl", qq("$NMAKE" /f ms\\ntdll.mak test));
+ system_or_die("Failure installing openssl",
+ qq("$NMAKE" /f ms\\ntdll.mak install));
+ chdir_or_die($TOPDIR);
+}
+
+# Run devenv /Upgrade on file.
+# If the file isn't a .sln file and the sln file isn't empty shortcut this
+# If the file isn't a .sln file touch the basename.sln of file to avoid
+# Visual Studio whining about its backup step.
+sub upgrade_solution {
+ my $file = shift;
+ my $interactive = shift;
+ my $flags = "";
+
+ my ($basename, $directories) = fileparse($file, qr/\.[^.]*$/);
+ my $sln = $directories . $basename . '.sln';
+ return if $file ne $sln and -s $sln; # shortcut if sln file is unique and isn't empty
+ # 'touch' the sln file so that Visual Studio 2012
+ # doesn't try to say there was an error while upgrading because
+ # it was unable to backup the original solution file.
+ unless (-e $sln) {
+ open(SLN, ">$sln") or die "Can't create $sln: $!";
+ close(SLN);
+ }
+ print "Upgrading $file (this may take a while)\n";
+ $flags = " /Upgrade" unless $interactive;
+ system_or_die("Failure upgrading $file", qq("$DEVENV" "$file"$flags));
+ if ($interactive) {
+ print "Can't do automatic upgrade, doing interactive upgrade\n";
+ print "IDE will load, choose to convert all projects, exit the IDE and\n";
+ print "save the resulting solution file\n\n";
+ print "Press Enter to Continue\n";
+ <>;
+ }
+}
+
+# Run the lineends.pl script
+sub httpd_fix_lineends {
+ chdir_or_die($HTTPD);
+ # This script fixes the lineendings to be CRLF in appropriate files.
+ # If we don't run this script then the DSW Upgrade will fail.
+ system_or_die(undef, qq("$PERL" "$SRCLIB\\apr\\build\\lineends.pl"));
+ chdir_or_die($TOPDIR);
+}
+
+# The httpd makefile in 2.4.4 doesn't know about .vcxproj files and
+# still thinks it's got an older version of Visual Studio because
+# .vcproj files have become .vcxproj.
+sub httpd_fix_makefile {
+ my $file = shift;
+
+ modify_file_in_place($file, sub {
+ s/\.vcproj/.vcxproj/i;
+ # below fixes that installd breaks when trying to install pcre because
+ # dll is named pcred.dll when a Debug build.
+ s/^(\s*copy srclib\\pcre\\pcre\.\$\(src_dll\)\s+"\$\(inst_dll\)"\s+<\s*\.y\s*)$/!IF EXISTS("srclib\\pcre\\pcre\.\$(src_dll)")\n$1!ENDIF\n!IF EXISTS("srclib\\pcre\\pcred\.\$(src_dll)")\n\tcopy srclib\\pcre\\pcred.\$(src_dll)\t\t\t"\$(inst_dll)" <.y\n!ENDIF\n/;
+ });
+}
+
+# This is a poor mans way of inserting a property group into a
+# vcxproj file. It assumes that the ending Project tag will
+# be the start and end of the line with no whitespace, probably
+# not an entirely valid assumption but it works in this case.
+sub insert_property_group {
+ my $file = shift;
+ my $xml = shift;
+ my $bak = shift;
+
+ modify_file_in_place($file, sub {
+ s#(^</Project>$)#<PropertyGroup>$xml</PropertyGroup>\n$1#i;
+ }, $bak);
+}
+
+# Strip pre-compiled headers compile and linker flags from file they follow
+# the form: /Ycfoo.h or /Yufoo.h.
+sub disable_pch {
+ my $file = shift;
+
+ modify_file_in_place($file, sub {
+ s#/Y[cu][^ ]+##;
+ });
+}
+
+# Find the first .exe .dll or .so OutputFile in the project
+# provided by file. There may be macros or paths in the
+# result.
+sub get_output_file {
+ my $file = shift;
+ my $result;
+ local $_; # Don't mess with the $_ from the find callback
+
+ open(IN, "<$file") or die "Couldn't open file $file: $!";
+ while (<IN>) {
+ if (m#<OutputFile>(.*?\.(?:exec|dll|so))</OutputFile>#) {
+ $result = $1;
+ last;
+ }
+ }
+ close(IN);
+ return $result;
+}
+
+# Find the name of the bdb library we've installed in our LIBDIR.
+sub find_bdb_lib {
+ my $result;
+ my $debug = $DEBUG ? 'd' : '';
+ find(sub {
+ if (not defined($result) and /^libdb\d+$debug\.lib$/) {
+ $result = $_;
+ }
+ }, $LIBDIR);
+ return $result;
+}
+
+# Insert the dependency dep into project file.
+# bak can be set to set the backup filename made of the project.
+sub insert_dependency_in_proj {
+ my $file = shift;
+ my $dep = shift;
+ my $bak = shift;
+
+ modify_file_in_place($file, sub {
+ s/(%\(AdditionalDependencies\))/$dep;$1/;
+ }, $bak);
+}
+
+# Do what's needed to enable BDB in the httpd and apr-util builds
+sub httpd_enable_bdb {
+ # Make APU_HAVE_DB be true so the code builds.
+ modify_file_in_place('srclib\apr-util\include\apu.hw', sub {
+ s/(#define\s+APU_HAVE_DB\s+)0/${1}1/;
+ });
+
+ # Fix the linkage, apr_dbm_db is hardcoded to libdb47.lib
+ my $bdb_lib = find_bdb_lib();
+ modify_file_in_place('srclib\apr-util\dbm\apr_dbm_db.vcxproj', sub {
+ s/libdb\d+\.lib/$bdb_lib/g;
+ }, '.bdb');
+
+ # httxt2dbm and htdbm need a BDB dependency and don't have one.
+ insert_dependency_in_proj('support\httxt2dbm.vcxproj', $bdb_lib, '.bdb');
+ insert_dependency_in_proj('support\htdbm.vcxproj', $bdb_lib, '.bdb');
+}
+
+# Apply the same fix as found in r1486937 on httpd 2.4.x branch.
+sub httpd_fix_debug {
+ my ($httpd_major, $httpd_minor, $httpd_patch) = $HTTPD_VER =~ /^(\d+)\.(\d+)\.(.+)$/;
+ return unless ($httpd_major <= 2 && $httpd_minor <= 4 && $httpd_patch < 5);
+
+ modify_file_in_place('libhttpd.dsp', sub {
+ s/^(!MESSAGE "libhttpd - Win32 Debug" \(based on "Win32 \(x86\) Dynamic-Link Library"\))$/$1\n!MESSAGE "libhttpd - Win32 Lexical" (based on "Win32 (x86) Dynamic-Link Library")/;
+ s/^(# Begin Group "headers")$/# Name "libhttpd - Win32 Lexical"\n$1/;
+ }, '.lexical');
+}
+
+sub build_httpd {
+ chdir_or_die($HTTPD);
+
+ my $vs_2013 = $VS_VER eq '2013';
+ my $vs_2012 = $VS_VER eq '2012';
+ my $vs_2010 = $VS_VER eq '2010';
+
+ httpd_fix_debug();
+
+ # I don't think cvtdsp.pl is necessary with Visual Studio 2012
+ # but it shouldn't hurt anything either. Including it allows
+ # for the possibility that this may work for older Visual Studio
+ # versions.
+ system_or_die("Failure converting DSP files",
+ qq("$PERL" srclib\\apr\\build\\cvtdsp.pl -2005));
+
+ upgrade_solution('Apache.dsw', $vs_2010);
+ httpd_enable_bdb();
+ httpd_fix_makefile('Makefile.win');
+
+ # Modules and support projects randomly fail due to an error about the
+ # CL.read.1.tlog file already existing. This is really because of the
+ # intermediate dirs being shared between modules, but for the time being
+ # this works around it.
+ find(sub {
+ if (/\.vcxproj$/) {
+ insert_property_group($_, '<TrackFileAccess>false</TrackFileAccess>')
+ }
+ }, 'modules', 'support');
+
+ if ($vs_2012 or $vs_2013) {
+ # Turn off pre-compiled headers for apr-iconv to avoid:
+ # LNK2011: http://msdn.microsoft.com/en-us/library/3ay26wa2(v=vs.110).aspx
+ disable_pch('srclib\apr-iconv\build\modules.mk.win');
+
+ # ApacheMonitor build fails due a duplicate manifest, turn off
+ # GenerateManifest
+ insert_property_group('support\win32\ApacheMonitor.vcxproj',
+ '<GenerateManifest>false</GenerateManifest>',
+ '.dupman');
+
+ # The APR libraries have projects named libapr but produce output named libapr-1
+ # The problem with this is in newer versions of Visual Studio TargetName defaults
+ # to the project name and not the basename of the output. Since the PDB file
+ # is named based on the TargetName the pdb file ends up being named libapr.pdb
+ # instead of libapr-1.pdb. The below call fixes this by explicitly providing
+ # a TargetName definition and shuts up some warnings about this problem as well.
+ # Without this fix the install fails when it tries to copy libapr-1.pdb.
+ # See this thread for details of the changes:
+ # http://social.msdn.microsoft.com/Forums/en-US/vcprerelease/thread/3c03e730-6a0e-4ee4-a0d6-6a5c3ce4343c
+ find(sub {
+ return unless (/\.vcxproj$/);
+ my $output_file = get_output_file($_);
+ return unless (defined($output_file));
+ my ($project_name) = fileparse($_, qr/\.[^.]*$/);
+ my ($old_style_target_name) = fileparse($output_file, qr/\.[^.]*$/);
+ return if ($old_style_target_name eq $project_name);
+ insert_property_group($_,
+ "<TargetName>$old_style_target_name</TargetName>", '.torig');
+ }, "$SRCLIB\\apr", "$SRCLIB\\apr-util", "$SRCLIB\\apr-iconv");
+ } elsif ($vs_2010) {
+ system_or_die("Failed fixing project guid references",
+ qq("$PYTHON" "$BINDIR\\ProjRef.py" -i Apache.sln"));
+ }
+
+ # If you're looking here it's possible that something went
+ # wrong with the httpd build. Debugging it can be a bit of a pain
+ # when using this script. There are log files created in the
+ # Release dirs named with the same basename as the project. E.G.
+ # for support\httxt2dbm.vcxproj you can find the log in
+ # support\Release\httxt2dbm.log. You can also run a similar build
+ # from in the IDE, but you'll need to disable some projects since
+ # they are separately driven by the Makefile.win. Grepping for
+ # '/project' in Makefile.win should tell you which projects. You'll
+ # also need to add the bin, include and lib paths to the appropriate
+ # configurations inside the project since we get them from the environment.
+ # Once all that is done the BuildBin project should be buildable for you to
+ # diagnose the problem.
+ my $target = $DEBUG ? "installd" : "installr";
+ system_or_die("Failed building/installing httpd/apr/apu/api",
+ qq("$NMAKE" /f Makefile.win $target "DBM_LIST=db" "INSTDIR=$INSTDIR"));
+
+ chdir_or_die($TOPDIR);
+}
+
+sub build_bdb {
+ chdir_or_die($BDB);
+
+ print(cwd(),$/);
+ my $sln = 'build_windows\Berkeley_DB_vs2010.sln';
+ upgrade_solution($sln);
+
+ my $platform = $DEBUG ? 'Debug|Win32' : 'Release|Win32';
+
+ # Build the db Project first since the full solution fails due to a broken
+ # dependency with the current version of BDB if we don't.
+ system_or_die("Failed building DBD (Project db)",
+ qq("$DEVENV" "$sln" /Build "$platform" /Project db));
+
+ system_or_die("Failed building DBD",
+ qq("$DEVENV" "$sln" /Build "$platform"));
+
+ # BDB doesn't seem to have it's own install routines so we'll do it ourselves
+ copy_or_die('build_windows\db.h', $INCDIR);
+ find(sub {
+ if (/\.(exe|dll|pdb)$/) {
+ copy_or_die($_, $BINDIR);
+ } elsif (/\.lib$/) {
+ copy_or_die($_, $LIBDIR);
+ }
+ }, 'build_windows\\Win32\\' . ($DEBUG ? 'Debug' : 'Release'));
+
+ chdir_or_die($TOPDIR);
+}
+
+# Right now this doesn't actually build serf but just patches it so that it
+# can build against a debug build of OpenSSL.
+sub build_serf {
+ chdir_or_die("$TOPDIR\\serf");
+
+ modify_file_in_place('serf.mak', sub {
+ s/^(INTDIR = Release)$/$1\nOPENSSL_OUT_SUFFIX =/;
+ s/^(INTDIR = Debug)$/$1\nOPENSSL_OUT_SUFFIX = .dbg/;
+ s/(\$\(OPENSSL_SRC\)\\out32(?:dll)?)/$1\$(OPENSSL_OUT_SUFFIX)/g;
+ }, '.debug');
+
+ chdir_or_die($TOPDIR);
+}
+
+sub build_dependencies {
+ build_bdb();
+ build_zlib();
+ build_pcre();
+ build_openssl();
+ build_serf();
+ build_httpd();
+}
+
+###############
+# COMMANDLINE #
+###############
+
+# Implement an interface somewhat similar to the make command line
+# You can give a list of commands and variable assignments interspersed.
+# Variable assignments are always VAR=VALUE with no spaces (in a single
+# argv entry).
+sub main {
+ my @commands;
+ while (my $arg = shift @ARGV) {
+ # Look for variable assignment
+ if (my ($lhs, $rhs) = $arg =~ /([^=]+)=(.*)/) {
+ # Bit of hackery to allow the global values in the
+ # Vars package to be overriden from the command line.
+ # E.G. "CMAKE=C:\CMake\cmake.exe" would replace the
+ # default value with this value.
+ if (exists($Vars::{$lhs})) {
+ ${$Vars::{$lhs}} = $rhs;
+ } else {
+ # Don't allow variables that don't exist already to be touched.
+ die "$lhs is an unknown variable.";
+ }
+ } else {
+ # Not a variable so must be a command
+ push @commands, $arg;
+ }
+ }
+
+ # No commands so add the implicit all command
+ if ($#commands == -1) {
+ push @commands, 'all';
+ }
+
+ # Set defaults and paths that have to be set at runtime since they are based
+ # on other variables.
+ Vars::set_defaults();
+ set_paths();
+
+ # Determine the Visual Studio Version and die if not supported.
+ check_vs_ver();
+
+ # change directory to our TOPDIR before running any commands
+ # the variable assignment might have changed it.
+ chdir_or_die($TOPDIR);
+
+ # Run the commands in the order given.
+ foreach my $command (@commands) {
+ if ($command eq 'clean') {
+ clean_structure(0);
+ } elsif ($command eq 'real-clean') {
+ clean_structure(1);
+ } elsif ($command eq 'prepare') {
+ prepare_structure();
+ } elsif ($command eq 'download') {
+ download_dependencies();
+ } elsif ($command eq 'extract') {
+ extract_dependencies();
+ } elsif ($command eq 'all') {
+ prepare_structure();
+ download_dependencies();
+ extract_dependencies();
+ build_dependencies();
+ } else {
+ die "Command '$command' is unknown";
+ }
+ }
+}
+
+main();
diff --git a/tools/dev/check-license.py b/tools/dev/check-license.py
new file mode 100755
index 0000000..37041be
--- /dev/null
+++ b/tools/dev/check-license.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python
+#
+# check if a file has the proper license in it
+#
+# USAGE: check-license.py [-C] file1 file2 ... fileN
+#
+# A 'file' may in fact be a directory, in which case it is recursively
+# searched.
+#
+# If the license cannot be found, then the filename is printed to stdout.
+# Typical usage:
+# $ check-license.py . > bad-files
+#
+# -C switch is used to change licenses.
+# Typical usage:
+# $ check-license.py -C file1 file2 ... fileN
+#
+
+import sys, os, re
+
+# Note: Right now, OLD_LICENSE and NEW_LICENSE are the same, because
+# r878444 updated all the license blocks. In the future, if we update
+# the license block again, change just NEW_LICENSE and use this script.
+
+OLD_LICENSE = '''\
+ \* ====================================================================
+ \* Licensed to the Subversion Corporation \(SVN Corp\.\) under one
+ \* or more contributor license agreements\. See the NOTICE file
+ \* distributed with this work for additional information
+ \* regarding copyright ownership\. The SVN Corp\. licenses this file
+ \* to you under the Apache License, Version 2\.0 \(the
+ \* "License"\); you may not use this file except in compliance
+ \* with the License\. You may obtain a copy of the License at
+ \*
+ \* http://www\.apache\.org/licenses/LICENSE-2\.0
+ \*
+ \* Unless required by applicable law or agreed to in writing,
+ \* software distributed under the License is distributed on an
+ \* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ \* KIND, either express or implied\. See the License for the
+ \* specific language governing permissions and limitations
+ \* under the License\.
+ \* ====================================================================
+'''
+
+SH_OLD_LICENSE = re.subn(r'(?m)^ \\\*', '#', OLD_LICENSE)[0]
+
+# Remember not to do regexp quoting for NEW_LICENSE. Only OLD_LICENSE
+# is used for matching; NEW_LICENSE is inserted as-is.
+NEW_LICENSE = '''\
+ * ====================================================================
+ * Licensed to the Subversion Corporation (SVN Corp.) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The SVN Corp. licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+'''
+
+SH_NEW_LICENSE = re.subn(r'(?m)^ \*', '#', NEW_LICENSE)[0]
+
+re_OLD = re.compile(OLD_LICENSE)
+re_SH_OLD = re.compile(SH_OLD_LICENSE)
+re_EXCLUDE = re.compile(
+ r'automatically generated by SWIG'
+ + r'|Generated from configure\.in'
+ + r'|placed into the public domain'
+ )
+
+c_comment_suffices = ('.c', '.java', '.h', '.cpp', '.hw', '.pas')
+
+# Yes, this is an empty tuple. No types that fit in this category uniformly
+# have a copyright block.
+# Possible types to add here:
+# ('.bat', '.py', '.pl', '.in')
+sh_comment_suffices = ()
+
+def check_file(fname, old_re, new_lic):
+ s = open(fname).read()
+ if (not old_re.search(s)
+ and not re_EXCLUDE.search(s)):
+ print(fname)
+
+def change_license(fname, old_re, new_lic):
+ s = open(fname).read()
+ m = old_re.search(s)
+ if not m:
+ print('ERROR: missing old license: %s' % fname)
+ else:
+ s = s[:m.start()] + new_lic + s[m.end():]
+ open(fname, 'w').write(s)
+ print('Changed: %s' % fname)
+
+def visit(baton, dirname, dircontents):
+ file_func = baton
+ for i in dircontents:
+ # Don't recurse into certain directories
+ if i in ('.svn', '.libs'):
+ dircontents.remove(i)
+ continue
+
+ extension = os.path.splitext(i)[1]
+ fullname = os.path.join(dirname, i)
+
+ if os.path.isdir(fullname):
+ continue
+
+ if extension in c_comment_suffices:
+ file_func(fullname, re_OLD, NEW_LICENSE)
+ elif extension in sh_comment_suffices:
+ file_func(fullname, re_SH_OLD, SH_NEW_LICENSE)
+
+def main():
+ file_func = check_file
+ if sys.argv[1] == '-C':
+ print('Changing license text...')
+ del sys.argv[1]
+ file_func = change_license
+
+ for f in sys.argv[1:]:
+ if os.path.isdir(f):
+ baton = file_func
+ for dirpath, dirs, files in os.walk(f):
+ visit(baton, dirpath, dirs + files)
+ else:
+ baton = file_func
+ dir, i = os.path.split(f)
+ visit(baton, dir, i)
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/dev/contribulyze.py b/tools/dev/contribulyze.py
new file mode 100755
index 0000000..8afc608
--- /dev/null
+++ b/tools/dev/contribulyze.py
@@ -0,0 +1,767 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+# See usage() for details, or run with --help option.
+#
+# .-------------------------------------------------.
+# | "An ad hoc format deserves an ad hoc parser." |
+# `-------------------------------------------------'
+#
+# Some Subversion project log messages include parseable data to help
+# track who's contributing what. The exact syntax is described in
+# http://subversion.apache.org/docs/community-guide/conventions.html#crediting,
+# but here's an example, indented by three spaces, i.e., the "Patch by:"
+# starts at the beginning of a line:
+#
+# Patch by: David Anderson <david.anderson@calixo.net>
+# <justin@erenkrantz.com>
+# me
+# (I wrote the regression tests.)
+# Found by: Phineas T. Phinder <phtph@ph1nderz.com>
+# Suggested by: Snosbig Q. Ptermione <sqptermione@example.com>
+# Review by: Justin Erenkrantz <justin@erenkrantz.com>
+# rooneg
+# (They caught an off-by-one error in the main loop.)
+#
+# This is a pathological example, but it shows all the things we might
+# need to parse. We need to:
+#
+# - Detect the officially-approved "WORD by: " fields.
+# - Grab every name (one per line) in each field.
+# - Handle names in various formats, unifying where possible.
+# - Expand "me" to the committer name for this revision.
+# - Associate a parenthetical aside following a field with that field.
+#
+# NOTES: You might be wondering, why not take 'svn log --xml' input?
+# Well, that would be the Right Thing to do, but in practice this was
+# a lot easier to whip up for straight 'svn log' output. I'd have no
+# objection to it being rewritten to take XML input.
+
+import os
+import sys
+import re
+import getopt
+try:
+ my_getopt = getopt.gnu_getopt
+except AttributeError:
+ my_getopt = getopt.getopt
+try:
+ # Python >=3.0
+ from urllib.parse import quote as urllib_parse_quote
+except ImportError:
+ # Python <3.0
+ from urllib import quote as urllib_parse_quote
+
+
+# Warnings and errors start with these strings. They are typically
+# followed by a colon and a space, as in "%s: " ==> "WARNING: ".
+warning_prefix = 'WARNING'
+error_prefix = 'ERROR'
+
+def complain(msg, fatal=False):
+ """Print MSG as a warning, or if FATAL is true, print it as an error
+ and exit."""
+ prefix = 'WARNING: '
+ if fatal:
+ prefix = 'ERROR: '
+ sys.stderr.write(prefix + msg + '\n')
+ if fatal:
+ sys.exit(1)
+
+
+def html_spam_guard(addr, entities_only=False):
+ """Return a spam-protected version of email ADDR that renders the
+ same in HTML as the original address. If ENTITIES_ONLY, use a less
+ thorough mangling scheme involving entities only, avoiding the use
+ of tags."""
+ if entities_only:
+ def mangle(x):
+ return "&#%d;" % ord (x)
+ else:
+ def mangle(x):
+ return "<span>&#%d;</span>" % ord(x)
+ return "".join(map(mangle, addr))
+
+
+def escape_html(str):
+ """Return an HTML-escaped version of STR."""
+ return str.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')
+
+
+_spam_guard_in_html_block_re = re.compile(r'&lt;([^&]*@[^&]*)&gt;')
+def _spam_guard_in_html_block_func(m):
+ return "&lt;%s&gt;" % html_spam_guard(m.group(1))
+def spam_guard_in_html_block(str):
+ """Take a block of HTML data, and run html_spam_guard() on parts of it."""
+ return _spam_guard_in_html_block_re.subn(_spam_guard_in_html_block_func,
+ str)[0]
+
+def html_header(title, page_heading=None, highlight_targets=False):
+ """Write HTML file header. TITLE and PAGE_HEADING parameters are
+ expected to already by HTML-escaped if needed. If HIGHLIGHT_TARGETS
+is true, then write out a style header that causes anchor targets to be
+surrounded by a red border when they are jumped to."""
+ if not page_heading:
+ page_heading = title
+ s = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"\n'
+ s += ' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n'
+ s += '<html><head>\n'
+ s += '<meta http-equiv="Content-Type"'
+ s += ' content="text/html; charset=UTF-8" />\n'
+ if highlight_targets:
+ s += '<style type="text/css">\n'
+ s += ':target { border: 2px solid red; }\n'
+ s += '</style>\n'
+ s += '<title>%s</title>\n' % title
+ s += '</head>\n\n'
+ s += '<body style="text-color: black; background-color: white">\n\n'
+ s += '<h1 style="text-align: center">%s</h1>\n\n' % page_heading
+ s += '<hr />\n\n'
+ return s
+
+
+def html_footer():
+ return '\n</body>\n</html>\n'
+
+
+class Contributor(object):
+ # Map contributor names to contributor instances, so that there
+ # exists exactly one instance associated with a given name.
+ # Fold names with email addresses. That is, if we see someone
+ # listed first with just an email address, but later with a real
+ # name and that same email address together, we create only one
+ # instance, and store it under both the email and the real name.
+ all_contributors = { }
+
+ def __init__(self, username, real_name, email):
+ """Instantiate a contributor. Don't use this to generate a
+ Contributor for an external caller, though, use .get() instead."""
+ self.real_name = real_name
+ self.username = username
+ self.email = email
+ self.is_committer = False # Assume not until hear otherwise.
+ self.is_full_committer = False # Assume not until hear otherwise.
+ # Map verbs (e.g., "Patch", "Suggested", "Review") to lists of
+ # LogMessage objects. For example, the log messages stored under
+ # "Patch" represent all the revisions for which this contributor
+ # contributed a patch.
+ self.activities = { }
+
+ def add_activity(self, field_name, log):
+ """Record that this contributor was active in FIELD_NAME in LOG."""
+ logs = self.activities.get(field_name)
+ if not logs:
+ logs = [ ]
+ self.activities[field_name] = logs
+ if not log in logs:
+ logs.append(log)
+
+ @staticmethod
+ def get(username, real_name, email):
+ """If this contributor is already registered, just return it;
+ otherwise, register it then return it. Hint: use parse() to
+ generate the arguments."""
+ c = None
+ for key in username, real_name, email:
+ if key and key in Contributor.all_contributors:
+ c = Contributor.all_contributors[key]
+ break
+ # If we didn't get a Contributor, create one now.
+ if not c:
+ c = Contributor(username, real_name, email)
+ # If we know identifying information that the Contributor lacks,
+ # then give it to the Contributor now.
+ if username:
+ if not c.username:
+ c.username = username
+ Contributor.all_contributors[username] = c
+ if real_name:
+ if not c.real_name:
+ c.real_name = real_name
+ Contributor.all_contributors[real_name] = c
+ if email:
+ if not c.email:
+ c.email = email
+ Contributor.all_contributors[email] = c
+ # This Contributor has never been in better shape; return it.
+ return c
+
+ def score(self):
+ """Return a contribution score for this contributor."""
+ # Right now we count a patch as 2, anything else as 1.
+ score = 0
+ for activity in self.activities.keys():
+ if activity == 'Patch':
+ score += len(self.activities[activity]) * 2
+ else:
+ score += len(self.activities[activity])
+ return score
+
+ def score_str(self):
+ """Return a contribution score HTML string for this contributor."""
+ patch_score = 0
+ other_score = 0
+ for activity in self.activities.keys():
+ if activity == 'Patch':
+ patch_score += len(self.activities[activity])
+ else:
+ other_score += len(self.activities[activity])
+ if patch_score == 0:
+ patch_str = ""
+ elif patch_score == 1:
+ patch_str = "1&nbsp;patch"
+ else:
+ patch_str = "%d&nbsp;patches" % patch_score
+ if other_score == 0:
+ other_str = ""
+ elif other_score == 1:
+ other_str = "1&nbsp;non-patch"
+ else:
+ other_str = "%d&nbsp;non-patches" % other_score
+ if patch_str:
+ if other_str:
+ return ",&nbsp;".join((patch_str, other_str))
+ else:
+ return patch_str
+ else:
+ return other_str
+
+ def __cmp__(self, other):
+ if self.is_full_committer and not other.is_full_committer:
+ return 1
+ if other.is_full_committer and not self.is_full_committer:
+ return -1
+ result = cmp(self.score(), other.score())
+ if result == 0:
+ return cmp(self.big_name(), other.big_name())
+ else:
+ return 0 - result
+
+ @staticmethod
+ def parse(name):
+ """Parse NAME, which can be
+
+ - A committer username, or
+ - A space-separated real name, or
+ - A space-separated real name followed by an email address in
+ angle brackets, or
+ - Just an email address in angle brackets.
+
+ (The email address may have '@' disguised as '{_AT_}'.)
+
+ Return a tuple of (committer_username, real_name, email_address)
+ any of which can be None if not available in NAME."""
+ username = None
+ real_name = None
+ email = None
+ name_components = name.split()
+ if len(name_components) == 1:
+ name = name_components[0] # Effectively, name = name.strip()
+ if name[0] == '<' and name[-1] == '>':
+ email = name[1:-1]
+ elif name.find('@') != -1 or name.find('{_AT_}') != -1:
+ email = name
+ else:
+ username = name
+ elif name_components[-1][0] == '<' and name_components[-1][-1] == '>':
+ real_name = ' '.join(name_components[0:-1])
+ email = name_components[-1][1:-1]
+ else:
+ real_name = ' '.join(name_components)
+
+ if email is not None:
+ # We unobfuscate here and work with the '@' internally, since
+ # we'll obfuscate it again (differently) before writing it out.
+ email = email.replace('{_AT_}', '@')
+
+ return username, real_name, email
+
+ def canonical_name(self):
+ """Return a canonical name for this contributor. The canonical
+ name may or may not be based on the contributor's actual email
+ address.
+
+ The canonical name will not contain filename-unsafe characters.
+
+ This method is guaranteed to return the same canonical name every
+ time only if no further contributions are recorded from this
+ contributor after the first call. This is because a contribution
+ may bring a new form of the contributor's name, one which affects
+ the algorithm used to construct canonical names."""
+ retval = None
+ if self.username:
+ retval = self.username
+ elif self.email:
+ # Take some rudimentary steps to shorten the email address, to
+ # make it more manageable. If this is ever discovered to result
+ # in collisions, we can always just use to the full address.
+ try:
+ at_posn = self.email.index('@')
+ first_dot_after_at = self.email.index('.', at_posn)
+ retval = self.email[0:first_dot_after_at]
+ except ValueError:
+ retval = self.email
+ elif self.real_name:
+ # Last resort: construct canonical name based on real name.
+ retval = ''.join(self.real_name.lower().split(' '))
+ if retval is None:
+ complain('Unable to construct a canonical name for Contributor.', True)
+ return urllib_parse_quote(retval, safe="!#$&'()+,;<=>@[]^`{}~")
+
+ def big_name(self, html=False, html_eo=False):
+ """Return as complete a name as possible for this contributor.
+ If HTML, then call html_spam_guard() on email addresses.
+ If HTML_EO, then do the same, but specifying entities_only mode."""
+ html = html or html_eo
+ name_bits = []
+ if self.real_name:
+ if html:
+ name_bits.append(escape_html(self.real_name))
+ else:
+ name_bits.append(self.real_name)
+ if self.email:
+ if not self.real_name and not self.username:
+ name_bits.append(self.email)
+ elif html:
+ name_bits.append("&lt;%s&gt;" % html_spam_guard(self.email, html_eo))
+ else:
+ name_bits.append("<%s>" % self.email)
+ if self.username:
+ if not self.real_name and not self.email:
+ name_bits.append(self.username)
+ else:
+ name_bits.append("(%s)" % self.username)
+ return " ".join(name_bits)
+
+ def __str__(self):
+ s = 'CONTRIBUTOR: '
+ s += self.big_name()
+ s += "\ncanonical name: '%s'" % self.canonical_name()
+ if len(self.activities) > 0:
+ s += '\n '
+ for activity in self.activities.keys():
+ val = self.activities[activity]
+ s += '[%s:' % activity
+ for log in val:
+ s += ' %s' % log.revision
+ s += ']'
+ return s
+
+ def html_out(self, revision_url_pattern, filename):
+ """Create an HTML file named FILENAME, showing all the revisions in which
+ this contributor was active."""
+ out = open(filename, 'w')
+ out.write(html_header(self.big_name(html_eo=True),
+ self.big_name(html=True), True))
+ unique_logs = { }
+
+ sorted_activities = sorted(self.activities.keys())
+
+ out.write('<div class="h2" id="activities" title="activities">\n\n')
+ out.write('<table border="1">\n')
+ out.write('<tr>\n')
+ for activity in sorted_activities:
+ out.write('<td>%s</td>\n\n' % activity)
+ out.write('</tr>\n')
+ out.write('<tr>\n')
+ for activity in sorted_activities:
+ out.write('<td>\n')
+ first_activity = True
+ for log in self.activities[activity]:
+ s = ',\n'
+ if first_activity:
+ s = ''
+ first_activity = False
+ out.write('%s<a href="#%s">%s</a>' % (s, log.revision, log.revision))
+ unique_logs[log] = True
+ out.write('</td>\n')
+ out.write('</tr>\n')
+ out.write('</table>\n\n')
+ out.write('</div>\n\n')
+
+ sorted_logs = sorted(unique_logs.keys())
+ for log in sorted_logs:
+ out.write('<hr />\n')
+ out.write('<div class="h3" id="%s" title="%s">\n' % (log.revision,
+ log.revision))
+ out.write('<pre>\n')
+ if revision_url_pattern:
+ revision_url = revision_url_pattern % log.revision[1:]
+ revision = '<a href="%s">%s</a>' \
+ % (escape_html(revision_url), log.revision)
+ else:
+ revision = log.revision
+ out.write('<b>%s | %s | %s</b>\n\n' % (revision,
+ escape_html(log.committer),
+ escape_html(log.date)))
+ out.write(spam_guard_in_html_block(escape_html(log.message)))
+ out.write('</pre>\n')
+ out.write('</div>\n\n')
+ out.write('<hr />\n')
+
+ out.write(html_footer())
+ out.close()
+
+
+class Field:
+ """One field in one log message."""
+ def __init__(self, name, alias = None):
+ # The name of this field (e.g., "Patch", "Review", etc).
+ self.name = name
+ # An alias for the name of this field (e.g., "Reviewed").
+ self.alias = alias
+ # A list of contributor objects, in the order in which they were
+ # encountered in the field.
+ self.contributors = [ ]
+ # Any parenthesized asides immediately following the field. The
+ # parentheses and trailing newline are left on. In theory, this
+ # supports concatenation of consecutive asides. In practice, the
+ # parser only detects the first one anyway, because additional
+ # ones are very uncommon and furthermore by that point one should
+ # probably be looking at the full log message.
+ self.addendum = ''
+ def add_contributor(self, contributor):
+ self.contributors.append(contributor)
+ def add_endum(self, addendum):
+ self.addendum += addendum
+ def __str__(self):
+ s = 'FIELD: %s (%d contributors)\n' % (self.name, len(self.contributors))
+ for contributor in self.contributors:
+ s += str(contributor) + '\n'
+ s += self.addendum
+ return s
+
+
+class LogMessage(object):
+ # Maps revision strings (e.g., "r12345") onto LogMessage instances,
+ # holding all the LogMessage instances ever created.
+ all_logs = { }
+ # Keep track of youngest rev.
+ max_revnum = 0
+ def __init__(self, revision, committer, date):
+ """Instantiate a log message. All arguments are strings,
+ including REVISION, which should retain its leading 'r'."""
+ self.revision = revision
+ self.committer = committer
+ self.date = date
+ self.message = ''
+ # Map field names (e.g., "Patch", "Review", "Suggested") onto
+ # Field objects.
+ self.fields = { }
+ if revision in LogMessage.all_logs:
+ complain("Revision '%s' seen more than once" % revision, True)
+ LogMessage.all_logs[revision] = self
+ rev_as_number = int(revision[1:])
+ if rev_as_number > LogMessage.max_revnum:
+ LogMessage.max_revnum = rev_as_number
+ def add_field(self, field):
+ self.fields[field.name] = field
+ def accum(self, line):
+ """Accumulate one more line of raw message."""
+ self.message += line
+
+ def __cmp__(self, other):
+ """Compare two log messages by revision number, for sort().
+ Return -1, 0 or 1 depending on whether a > b, a == b, or a < b.
+ Note that this is reversed from normal sorting behavior, but it's
+ what we want for reverse chronological ordering of revisions."""
+ a = int(self.revision[1:])
+ b = int(other.revision[1:])
+ if a > b: return -1
+ if a < b: return 1
+ else: return 0
+
+ def __str__(self):
+ s = '=' * 15
+ header = ' LOG: %s | %s ' % (self.revision, self.committer)
+ s += header
+ s += '=' * 15
+ s += '\n'
+ for field_name in self.fields.keys():
+ s += str(self.fields[field_name]) + '\n'
+ s += '-' * 15
+ s += '-' * len(header)
+ s += '-' * 15
+ s += '\n'
+ return s
+
+
+
+### Code to parse the logs. ##
+
+log_separator = '-' * 72 + '\n'
+log_header_re = re.compile\
+ ('^(r[0-9]+) \| ([^|]+) \| ([^|]+) \| ([0-9]+)[^0-9]')
+field_re = re.compile(
+ '^(Patch|Review(ed)?|Suggested|Found|Inspired|Tested|Reported) by:'
+ '\s*\S.*$')
+field_aliases = {
+ 'Reviewed' : 'Review',
+ 'Reported' : 'Found',
+}
+parenthetical_aside_re = re.compile('^\s*\(.*\)\s*$')
+
+def graze(input):
+ just_saw_separator = False
+
+ while True:
+ line = input.readline()
+ if line == '': break
+ if line == log_separator:
+ if just_saw_separator:
+ sys.stderr.write('Two separators in a row.\n')
+ sys.exit(1)
+ else:
+ just_saw_separator = True
+ num_lines = None
+ continue
+ else:
+ if just_saw_separator:
+ m = log_header_re.match(line)
+ if not m:
+ sys.stderr.write('Could not match log message header.\n')
+ sys.stderr.write('Line was:\n')
+ sys.stderr.write("'%s'\n" % line)
+ sys.exit(1)
+ else:
+ log = LogMessage(m.group(1), m.group(2), m.group(3))
+ num_lines = int(m.group(4))
+ just_saw_separator = False
+ saw_patch = False
+ line = input.readline()
+ # Handle 'svn log -v' by waiting for the blank line.
+ while line != '\n':
+ line = input.readline()
+ # Parse the log message.
+ field = None
+ while num_lines > 0:
+ line = input.readline()
+ log.accum(line)
+ m = field_re.match(line)
+ if m:
+ # We're on the first line of a field. Parse the field.
+ while m:
+ if not field:
+ ident = m.group(1)
+ if ident in field_aliases:
+ field = Field(field_aliases[ident], ident)
+ else:
+ field = Field(ident)
+ # Each line begins either with "WORD by:", or with whitespace.
+ in_field_re = re.compile('^('
+ + (field.alias or field.name)
+ + ' by:\s+|\s+)([^\s(].*)')
+ m = in_field_re.match(line)
+ if m is None:
+ sys.stderr.write("Error matching: %s\n" % (line))
+ user, real, email = Contributor.parse(m.group(2))
+ if user == 'me':
+ user = log.committer
+ c = Contributor.get(user, real, email)
+ c.add_activity(field.name, log)
+ if (field.name == 'Patch'):
+ saw_patch = True
+ field.add_contributor(c)
+ line = input.readline()
+ if line == log_separator:
+ # If the log message doesn't end with its own
+ # newline (that is, there's the newline added by the
+ # svn client, but no further newline), then just move
+ # on to the next log entry.
+ just_saw_separator = True
+ num_lines = 0
+ break
+ log.accum(line)
+ num_lines -= 1
+ m = in_field_re.match(line)
+ if not m:
+ m = field_re.match(line)
+ if not m:
+ aside_match = parenthetical_aside_re.match(line)
+ if aside_match:
+ field.add_endum(line)
+ log.add_field(field)
+ field = None
+ num_lines -= 1
+ if not saw_patch and log.committer != '(no author)':
+ c = Contributor.get(log.committer, None, None)
+ c.add_activity('Patch', log)
+ continue
+
+index_introduction = '''
+<p>The following list of contributors and their contributions is meant
+to help us keep track of whom to consider for commit access. The list
+was generated from "svn&nbsp;log" output by <a
+href="http://svn.apache.org/repos/asf/subversion/trunk/tools/dev/contribulyze.py"
+>contribulyze.py</a>, which looks for log messages that use the <a
+href="http://subversion.apache.org/docs/community-guide/conventions.html#crediting"
+>special contribution format</a>.</p>
+
+<p><i>Please do not use this list as a generic guide to who has
+contributed what to Subversion!</i> It omits existing <a
+href="http://svn.apache.org/repos/asf/subversion/trunk/COMMITTERS"
+>full committers</a>, for example, because they are irrelevant to our
+search for new committers. Also, it merely counts changes, it does
+not evaluate them. To truly understand what someone has contributed,
+you have to read their changes in detail. This page can only assist
+human judgement, not substitute for it.</p>
+
+'''
+
+def drop(revision_url_pattern):
+ # Output the data.
+ #
+ # The data structures are all linked up nicely to one another. You
+ # can get all the LogMessages, and each LogMessage contains all the
+ # Contributors involved with that commit; likewise, each Contributor
+ # points back to all the LogMessages it contributed to.
+ #
+ # However, the HTML output is pretty simple right now. It's not take
+ # full advantage of all that cross-linking. For each contributor, we
+ # just create a file listing all the revisions contributed to; and we
+ # build a master index of all contributors, each name being a link to
+ # that contributor's individual file. Much more is possible... but
+ # let's just get this up and running first.
+
+ for key in LogMessage.all_logs.keys():
+ # You could print out all log messages this way, if you wanted to.
+ pass
+ # print LogMessage.all_logs[key]
+
+ detail_subdir = "detail"
+ if not os.path.exists(detail_subdir):
+ os.mkdir(detail_subdir)
+
+ index = open('index.html', 'w')
+ index.write(html_header('Contributors as of r%d' % LogMessage.max_revnum))
+ index.write(index_introduction)
+ index.write('<ol>\n')
+ # The same contributor appears under multiple keys, so uniquify.
+ seen_contributors = { }
+ # Sorting alphabetically is acceptable, but even better would be to
+ # sort by number of contributions, so the most active people appear at
+ # the top -- that way we know whom to look at first for commit access
+ # proposals.
+ sorted_contributors = sorted(Contributor.all_contributors.values())
+ for c in sorted_contributors:
+ if c not in seen_contributors:
+ if c.score() > 0:
+ if c.is_full_committer:
+ # Don't even bother to print out full committers. They are
+ # a distraction from the purposes for which we're here.
+ continue
+ else:
+ committerness = ''
+ if c.is_committer:
+ committerness = '&nbsp;(partial&nbsp;committer)'
+ urlpath = "%s/%s.html" % (detail_subdir, c.canonical_name())
+ fname = os.path.join(detail_subdir, "%s.html" % c.canonical_name())
+ index.write('<li><p><a href="%s">%s</a>&nbsp;[%s]%s</p></li>\n'
+ % (urllib_parse_quote(urlpath),
+ c.big_name(html=True),
+ c.score_str(), committerness))
+ c.html_out(revision_url_pattern, fname)
+ seen_contributors[c] = True
+ index.write('</ol>\n')
+ index.write(html_footer())
+ index.close()
+
+
+def process_committers(committers):
+ """Read from open file handle COMMITTERS, which should be in
+ the same format as the Subversion 'COMMITTERS' file. Create
+ Contributor objects based on the contents."""
+ line = committers.readline()
+ while line != 'Blanket commit access:\n':
+ line = committers.readline()
+ in_full_committers = True
+ matcher = re.compile('(\S+)\s+([^\(\)]+)\s+(\([^()]+\)){0,1}')
+ line = committers.readline()
+ while line:
+ # Every @-sign we see after this point indicates a committer line...
+ if line == 'Commit access for specific areas:\n':
+ in_full_committers = False
+ # ...except in the "dormant committers" area, which comes last anyway.
+ if line == 'Committers who have asked to be listed as dormant:\n':
+ in_full_committers = True
+ elif line.find('@') >= 0:
+ line = line.lstrip()
+ m = matcher.match(line)
+ user = m.group(1)
+ real_and_email = m.group(2).strip()
+ ignored, real, email = Contributor.parse(real_and_email)
+ c = Contributor.get(user, real, email)
+ c.is_committer = True
+ c.is_full_committer = in_full_committers
+ line = committers.readline()
+
+
+def usage():
+ print('USAGE: %s [-C COMMITTERS_FILE] < SVN_LOG_OR_LOG-V_OUTPUT' \
+ % os.path.basename(sys.argv[0]))
+ print('')
+ print('Create HTML files in the current directory, rooted at index.html,')
+ print('in which you can browse to see who contributed what.')
+ print('')
+ print('The log input should use the contribution-tracking format defined')
+ print('in http://subversion.apache.org/docs/community-guide/conventions.html#crediting.')
+ print('')
+ print('Options:')
+ print('')
+ print(' -h, -H, -?, --help Print this usage message and exit')
+ print(' -C FILE Use FILE as the COMMITTERS file')
+ print(' -U URL Use URL as a Python interpolation pattern to')
+ print(' generate URLs to link revisions to some kind')
+ print(' of web-based viewer (e.g. ViewCVS). The')
+ print(' interpolation pattern should contain exactly')
+ print(' one format specifier, \'%s\', which will be')
+ print(' replaced with the revision number.')
+ print('')
+
+
+def main():
+ try:
+ opts, args = my_getopt(sys.argv[1:], 'C:U:hH?', [ 'help' ])
+ except getopt.GetoptError as e:
+ complain(str(e) + '\n\n')
+ usage()
+ sys.exit(1)
+
+ # Parse options.
+ revision_url_pattern = None
+ for opt, value in opts:
+ if opt in ('--help', '-h', '-H', '-?'):
+ usage()
+ sys.exit(0)
+ elif opt == '-C':
+ process_committers(open(value))
+ elif opt == '-U':
+ revision_url_pattern = value
+
+ # Gather the data.
+ graze(sys.stdin)
+
+ # Output the data.
+ drop(revision_url_pattern)
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/dev/datecheck.py b/tools/dev/datecheck.py
new file mode 100755
index 0000000..c5b4caf
--- /dev/null
+++ b/tools/dev/datecheck.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+### This is a debugging script to test date-ordering in an SVN repository.
+
+'''Tell which revisions are out of order w.r.t. date in a repository.
+Takes "svn log -q -r1:HEAD" output, prints results like this:
+
+ $ svn log -q -r1:HEAD | ./datecheck.py
+ [...]
+ r42 OK 2003-06-02 22:20:31 -0500
+ r43 OK 2003-06-02 22:20:31 -0500
+ r44 OK 2003-06-02 23:29:14 -0500
+ r45 OK 2003-06-02 23:29:14 -0500
+ r46 OK 2003-06-02 23:33:13 -0500
+ r47 OK 2003-06-10 15:19:47 -0500
+ r48 NOT OK 2003-06-02 23:33:13 -0500
+ r49 OK 2003-06-10 15:19:48 -0500
+ r50 NOT OK 2003-06-02 23:33:13 -0500
+ [...]
+'''
+
+import sys
+import time
+
+log_msg_separator = "-" * 72 + "\n"
+
+line = sys.stdin.readline()
+last_date = 0
+while line:
+
+ if not line:
+ break
+
+ if line == log_msg_separator:
+ line = sys.stdin.readline()
+ continue
+
+ # We're looking at a revision line like this:
+ #
+ # "r1 | svn | 2001-08-30 23:24:14 -0500 (Thu, 30 Aug 2001)"
+ #
+ # Parse out
+
+ rev, ignored, date_full = line.split("|")
+ rev = rev.strip()
+ date_full = date_full.strip()
+
+ # We only need the machine-readable portion of the date, so ignore
+ # the parenthesized part on the end, which is meant for humans.
+
+ # Get the "2004-06-02 00:15:08" part of "2004-06-02 00:15:08 -0500".
+ date = date_full[0:19]
+ # Get the "-0500" part of "2004-06-02 00:15:08 -0500".
+ offset = date_full[20:25]
+
+ # Parse the offset by hand and adjust the date accordingly, because
+ # http://docs.python.org/lib/module-time.html doesn't seem to offer
+ # a standard way to parse "-0500", "-0600", etc, suffixes. Arggh.
+ offset_sign = offset[0:1]
+ offset_hours = int(offset[1:3])
+ offset_minutes = int(offset[3:5])
+
+ # Get a first draft of the date...
+ date_as_int = time.mktime(time.strptime(date, "%Y-%m-%d %H:%M:%S"))
+ # ... but it's still not correct, we must adjust for the offset.
+ if offset_sign == "-":
+ date_as_int -= (offset_hours * 3600)
+ date_as_int -= (offset_minutes * 60)
+ elif offset_sign == "+":
+ date_as_int += (offset_hours * 3600)
+ date_as_int += (offset_minutes * 60)
+ else:
+ sys.stderr.write("Error: unknown offset sign '%s'.\n" % offset_sign)
+ sys.exit(1)
+
+ ok_not_ok = " OK"
+ if last_date > date_as_int:
+ ok_not_ok = "NOT OK"
+
+ print("%-8s %s %s %s" % (rev, ok_not_ok, date, offset))
+ last_date = date_as_int
+ line = sys.stdin.readline()
diff --git a/tools/dev/find-bad-style.py b/tools/dev/find-bad-style.py
new file mode 100755
index 0000000..537cc3d
--- /dev/null
+++ b/tools/dev/find-bad-style.py
@@ -0,0 +1,57 @@
+#!/usr/bin/python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+# Find places in our code where whitespace is erroneously used before
+# the open-paren on a function all. This is typically manifested like:
+#
+# return svn_some_function
+# (param1, param2, param3)
+#
+#
+# USAGE: find-bad-style.py FILE1 FILE2 ...
+#
+
+import sys
+import re
+
+re_call = re.compile(r'^\s*\(')
+re_func = re.compile(r'.*[a-z0-9_]{1,}\s*$')
+
+
+def scan_file(fname):
+ lines = open(fname).readlines()
+
+ prev = None
+ line_num = 1
+
+ for line in lines:
+ if re_call.match(line):
+ if prev and re_func.match(prev):
+ print('%s:%d:%s' % (fname, line_num - 1, prev.rstrip()))
+
+ prev = line
+ line_num += 1
+
+
+if __name__ == '__main__':
+ for fname in sys.argv[1:]:
+ scan_file(fname)
diff --git a/tools/dev/find-control-statements.py b/tools/dev/find-control-statements.py
new file mode 100755
index 0000000..1c6c3b2
--- /dev/null
+++ b/tools/dev/find-control-statements.py
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+# Find places in our code that are part of control statements
+# i.e. "for", "if" and "while". That output is then easily
+# searched for various interesting / complex pattern.
+#
+#
+# USAGE: find-control-statements.py FILE1 FILE2 ...
+#
+
+import sys
+
+header_shown = False
+last_line_num = None
+
+def print_line(fname, line_num, line):
+ """ Print LINE of number LINE_NUM in file FNAME.
+ Show FNAME only once per file and LINE_NUM only for
+ non-consecutive lines.
+ """
+ global header_shown
+ global last_line_num
+
+ if not header_shown:
+ print('')
+ print(fname)
+ header_shown = True
+
+ if last_line_num and (last_line_num + 1 == line_num):
+ print(" %s" % line),
+ else:
+ print('%5d:%s' % (line_num, line)),
+
+ last_line_num = line_num
+
+def is_control(line, index, word):
+ """ Return whether LINE[INDEX] is actual the start position of
+ control statement WORD. It must be followed by an opening
+ parantheses and only whitespace in between WORD and the '('.
+ """
+ if index > 0:
+ if not (line[index-1] in [' ', '\t', ';']):
+ return False
+
+ index = index + len(word)
+ parantheses_index = line.find('(', index)
+ if parantheses_index == -1:
+ return False
+
+ while index < parantheses_index:
+ if not (line[index] in [' ', '\t',]):
+ return False
+
+ index += 1
+
+ return True
+
+def find_specific_control(line, control):
+ """ Return the first offset of the control statement CONTROL
+ in LINE, or -1 if it can't be found.
+ """
+ current = 0
+
+ while current != -1:
+ index = line.find(control, current)
+ if index == -1:
+ break
+
+ if is_control(line, index, control):
+ return index
+
+ current = index + len(control);
+
+ return -1
+
+def find_control(line):
+ """ Return the offset of the first control in LINE or -1
+ if there is none.
+ """
+ current = 0
+
+ for_index = find_specific_control(line, "for")
+ if_index = find_specific_control(line, "if")
+ while_index = find_specific_control(line, "while")
+
+ first = len(line)
+ if for_index >= 0 and first > for_index:
+ first = for_index
+ if if_index >= 0 and first > if_index:
+ first = if_index
+ if while_index >= 0 and first > while_index:
+ first = while_index
+
+ if first == len(line):
+ return -1
+ return first
+
+def parantheses_delta(line):
+ """ Return the number of opening minus the number of closing
+ parantheses in LINE. Don't count those inside strings or chars.
+ """
+ escaped = False
+ in_squote = False
+ in_dquote = False
+
+ delta = 0
+
+ for c in line:
+ if escaped:
+ escaped = False
+
+ elif in_dquote:
+ if c == '\\':
+ escaped = True
+ elif c == '"':
+ in_dquote = False
+
+ elif in_squote:
+ if c == '\\':
+ escaped = True
+ elif c == "'":
+ in_squote = False
+
+ elif c == '(':
+ delta += 1
+ elif c == ')':
+ delta -= 1
+ elif c == '"':
+ in_dquote = True
+ elif c == "'":
+ in_squote -= True
+
+ return delta
+
+def scan_file(fname):
+ lines = open(fname).readlines()
+
+ line_num = 1
+ parantheses_level = 0
+
+ for line in lines:
+
+ if parantheses_level > 0:
+ index = 0
+ else:
+ index = find_control(line)
+
+ if index >= 0:
+ print_line(fname, line_num, line)
+ parantheses_level += parantheses_delta(line[index:])
+
+ line_num += 1
+
+if __name__ == '__main__':
+ for fname in sys.argv[1:]:
+ header_shown = False
+ last_line_num = None
+ scan_file(fname)
diff --git a/tools/dev/find-unmoved-deprecated.sh b/tools/dev/find-unmoved-deprecated.sh
new file mode 100755
index 0000000..c689853
--- /dev/null
+++ b/tools/dev/find-unmoved-deprecated.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+# Find functions marked a SVN_DEPRECATED, but which have not been moved
+# to their associated deprecated.c file.
+#
+# Run this from within the subversion/include/ directory.
+#
+
+deprecated="`cat svn_*.h | fgrep -A 2 SVN_DEPRECATED | sed -n '/^svn_/s/(.*//p'`"
+for func in $deprecated ; do
+ if grep -q "${func}(" ../*/deprecated.c ; then
+ /usr/bin/true
+ else
+ echo $func was not found
+ fi
+done
diff --git a/tools/dev/fsfs-access-map.c b/tools/dev/fsfs-access-map.c
new file mode 100644
index 0000000..7f670ee
--- /dev/null
+++ b/tools/dev/fsfs-access-map.c
@@ -0,0 +1,794 @@
+/* fsfs-access-map.c -- convert strace output into FSFS access bitmap
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "svn_pools.h"
+#include "svn_string.h"
+#include "svn_io.h"
+
+#include "private/svn_string_private.h"
+
+/* The information we gather for each file. There will be one instance
+ * per file name - even if the file got deleted and re-created.
+ */
+typedef struct file_stats_t
+{
+ /* file name as found in the open() call */
+ const char *name;
+
+ /* file size as determined during the tool run. Will be 0 for
+ * files that no longer exist. However, there may still be entries
+ * in the read_map. */
+ apr_int64_t size;
+
+ /* for rev files (packed or non-packed), this will be the first revision
+ * that file. -1 for non-rev files. */
+ apr_int64_t rev_num;
+
+ /* number of times this file got opened */
+ apr_int64_t open_count;
+
+ /* number of lseek counts */
+ apr_int64_t seek_count;
+
+ /* number of lseek calls to clusters not previously read */
+ apr_int64_t uncached_seek_count;
+
+ /* number of lseek counts not followed by a read */
+ apr_int64_t unnecessary_seeks;
+
+ /* number of read() calls */
+ apr_int64_t read_count;
+
+ /* number of read() calls that returned 0 bytes */
+ apr_int64_t empty_reads;
+
+ /* total number of bytes returned by those reads */
+ apr_int64_t read_size;
+
+ /* number of clusters read */
+ apr_int64_t clusters_read;
+
+ /* number of different clusters read
+ * (i.e. number of non-zero entries in read_map). */
+ apr_int64_t unique_clusters_read;
+
+ /* cluster -> read count mapping (1 word per cluster, saturated at 64k) */
+ apr_array_header_t *read_map;
+
+} file_stats_t;
+
+/* Represents an open file handle. It refers to a file and concatenates
+ * consecutive reads such that we don't artificially hit the same cluster
+ * multiple times. Instances of this type will be reused to limit the
+ * allocation load on the lookup map.
+ */
+typedef struct handle_info_t
+{
+ /* the open file */
+ file_stats_t *file;
+
+ /* file offset at which the current series of reads started (default: 0) */
+ apr_int64_t last_read_start;
+
+ /* bytes read so far in the current series of reads started (default: 0) */
+ apr_int64_t last_read_size;
+
+ /* number of read() calls in this series */
+ apr_int64_t read_count;
+} handle_info_t;
+
+/* useful typedef */
+typedef unsigned char byte;
+typedef unsigned short word;
+
+/* an RGB color */
+typedef byte color_t[3];
+
+/* global const char * file name -> *file_info_t map */
+static apr_hash_t *files = NULL;
+
+/* global int handle -> *handle_info_t map. Entries don't get removed
+ * by close(). Instead, we simply recycle (and re-initilize) existing
+ * instances. */
+static apr_hash_t *handles = NULL;
+
+/* assume cluster size. 64 and 128kB are typical values for RAIDs. */
+static apr_int64_t cluster_size = 64 * 1024;
+
+/* Call this after a sequence of reads has been ended by either close()
+ * or lseek() for this HANDLE_INFO. This will update the read_map and
+ * unique_clusters_read members of the underlying file_info_t structure.
+ */
+static void
+store_read_info(handle_info_t *handle_info)
+{
+ if (handle_info->last_read_size)
+ {
+ apr_size_t i;
+ apr_size_t first_cluster
+ = (apr_size_t)(handle_info->last_read_start / cluster_size);
+ apr_size_t last_cluster
+ = (apr_size_t)(( handle_info->last_read_start
+ + handle_info->last_read_size
+ - 1) / cluster_size);
+
+ /* auto-expand access map in case the file later shrunk or got deleted */
+ while (handle_info->file->read_map->nelts <= last_cluster)
+ APR_ARRAY_PUSH(handle_info->file->read_map, word) = 0;
+
+ /* accumulate the accesses per cluster. Saturate and count first
+ * (i.e. disjoint) accesses clusters */
+ handle_info->file->clusters_read += last_cluster - first_cluster + 1;
+ for (i = first_cluster; i <= last_cluster; ++i)
+ {
+ word *count = &APR_ARRAY_IDX(handle_info->file->read_map, i, word);
+ if (*count == 0)
+ handle_info->file->unique_clusters_read++;
+ if (*count < 0xffff)
+ ++*count;
+ }
+ }
+ else if (handle_info->read_count == 0)
+ {
+ /* two consecutive seeks */
+ handle_info->file->unnecessary_seeks++;
+ }
+}
+
+/* Handle a open() call. Ensures that a file_info_t for the given NAME
+ * exists. Auto-create and initialize a handle_info_t for it linked to
+ * HANDLE.
+ */
+static void
+open_file(const char *name, int handle)
+{
+ file_stats_t *file = apr_hash_get(files, name, APR_HASH_KEY_STRING);
+ handle_info_t *handle_info = apr_hash_get(handles, &handle, sizeof(handle));
+
+ /* auto-create file info */
+ if (!file)
+ {
+ apr_pool_t *pool = apr_hash_pool_get(files);
+ apr_pool_t *subpool = svn_pool_create(pool);
+
+ apr_file_t *apr_file = NULL;
+ apr_finfo_t finfo = { 0 };
+ int cluster_count = 0;
+
+ /* determine file size (if file still exists) */
+ apr_file_open(&apr_file, name,
+ APR_READ | APR_BUFFERED, APR_OS_DEFAULT, subpool);
+ if (apr_file)
+ apr_file_info_get(&finfo, APR_FINFO_SIZE, apr_file);
+ svn_pool_destroy(subpool);
+
+ file = apr_pcalloc(pool, sizeof(*file));
+ file->name = apr_pstrdup(pool, name);
+ file->size = finfo.size;
+
+ /* pre-allocate cluster map accordingly
+ * (will be auto-expanded later if necessary) */
+ cluster_count = (int)(1 + (file->size - 1) / cluster_size);
+ file->read_map = apr_array_make(pool, file->size
+ ? cluster_count
+ : 1, sizeof(word));
+
+ while (file->read_map->nelts < cluster_count)
+ APR_ARRAY_PUSH(file->read_map, byte) = 0;
+
+ /* determine first revision of rev / packed rev files */
+ if (strstr(name, "/db/revs/") != NULL && strstr(name, "manifest") == NULL)
+ if (strstr(name, ".pack/pack") != NULL)
+ file->rev_num = SVN_STR_TO_REV(strstr(name, "/db/revs/") + 9);
+ else
+ file->rev_num = SVN_STR_TO_REV(strrchr(name, '/') + 1);
+ else
+ file->rev_num = -1;
+
+ /* filter out log/phys index files */
+ if (file->rev_num >= 0)
+ {
+ const char *suffix = name + strlen(name) - 4;
+ if (strcmp(suffix, ".l2p") == 0 || strcmp(suffix, ".p2l") == 0)
+ file->rev_num = -1;
+ }
+
+ apr_hash_set(files, file->name, APR_HASH_KEY_STRING, file);
+ }
+
+ file->open_count++;
+
+ /* auto-create handle instance */
+ if (!handle_info)
+ {
+ apr_pool_t *pool = apr_hash_pool_get(handles);
+ int *key = apr_palloc(pool, sizeof(*key));
+ *key = handle;
+
+ handle_info = apr_pcalloc(pool, sizeof(*handle_info));
+ apr_hash_set(handles, key, sizeof(*key), handle_info);
+ }
+
+ /* link handle to file */
+ handle_info->file = file;
+ handle_info->last_read_start = 0;
+ handle_info->last_read_size = 0;
+}
+
+/* COUNT bytes have been read from file with the given HANDLE.
+ */
+static void
+read_file(int handle, apr_int64_t count)
+{
+ handle_info_t *handle_info = apr_hash_get(handles, &handle, sizeof(handle));
+ if (handle_info)
+ {
+ /* known file handle -> expand current read sequence */
+
+ handle_info->read_count++;
+ handle_info->last_read_size += count;
+ handle_info->file->read_count++;
+ handle_info->file->read_size += count;
+
+ if (count == 0)
+ handle_info->file->empty_reads++;
+ }
+}
+
+/* Seek to offset LOCATION in file given by HANDLE.
+ */
+static void
+seek_file(int handle, apr_int64_t location)
+{
+ handle_info_t *handle_info = apr_hash_get(handles, &handle, sizeof(handle));
+ if (handle_info)
+ {
+ /* known file handle -> end current read sequence and start a new one */
+
+ apr_size_t cluster = (apr_size_t)(location / cluster_size);
+
+ store_read_info(handle_info);
+
+ handle_info->last_read_size = 0;
+ handle_info->last_read_start = location;
+ handle_info->read_count = 0;
+ handle_info->file->seek_count++;
+
+ /* if we seek to a location that had not been read from before,
+ * there will probably be a real I/O seek on the following read.
+ */
+ if ( handle_info->file->read_map->nelts <= cluster
+ || APR_ARRAY_IDX(handle_info->file->read_map, cluster, word) == 0)
+ handle_info->file->uncached_seek_count++;
+ }
+}
+
+/* The given file HANDLE has been closed.
+ */
+static void
+close_file(int handle)
+{
+ /* for known file handles, end current read sequence */
+
+ handle_info_t *handle_info = apr_hash_get(handles, &handle, sizeof(handle));
+ if (handle_info)
+ store_read_info(handle_info);
+}
+
+/* Parse / process non-empty the LINE from an strace output.
+ */
+static void
+parse_line(svn_stringbuf_t *line)
+{
+ /* determine function name, first parameter and return value */
+ char *func_end = strchr(line->data, '(');
+ char *return_value = strrchr(line->data, ' ');
+ char *first_param_end;
+ apr_int64_t func_return = 0;
+ char *func_start = strchr(line->data, ' ');
+
+ if (func_end == NULL || return_value == NULL)
+ return;
+
+ if (func_start == NULL || func_start > func_end)
+ func_start = line->data;
+ else
+ while(*func_start == ' ')
+ func_start++;
+
+ first_param_end = strchr(func_end, ',');
+ if (first_param_end == NULL)
+ first_param_end = strchr(func_end, ')');
+
+ if (first_param_end == NULL)
+ return;
+
+ *func_end++ = 0;
+ *first_param_end = 0;
+ ++return_value;
+
+ /* (try to) convert the return value into an integer.
+ * If that fails, continue anyway as defaulting to 0 will be safe for us. */
+ svn_error_clear(svn_cstring_atoi64(&func_return, return_value));
+
+ /* process those operations that we care about */
+ if (strcmp(func_start, "open") == 0)
+ {
+ /* remove double quotes from file name parameter */
+ *func_end++ = 0;
+ *--first_param_end = 0;
+
+ open_file(func_end, (int)func_return);
+ }
+ else if (strcmp(func_start, "read") == 0)
+ read_file(atoi(func_end), func_return);
+ else if (strcmp(func_start, "lseek") == 0)
+ seek_file(atoi(func_end), func_return);
+ else if (strcmp(func_start, "close") == 0)
+ close_file(atoi(func_end));
+}
+
+/* Process the strace output stored in FILE.
+ */
+static void
+parse_file(apr_file_t *file)
+{
+ apr_pool_t *pool = svn_pool_create(NULL);
+ apr_pool_t *iterpool = svn_pool_create(pool);
+
+ /* limit lines to 4k (usually, we need less than 200 bytes) */
+ svn_stringbuf_t *line = svn_stringbuf_create_ensure(4096, pool);
+
+ do
+ {
+ svn_error_t *err = NULL;
+
+ line->len = line->blocksize-1;
+ err = svn_io_read_length_line(file, line->data, &line->len, iterpool);
+ svn_error_clear(err);
+ if (err)
+ break;
+
+ parse_line(line);
+ svn_pool_clear(iterpool);
+ }
+ while (line->len > 0);
+}
+
+/* qsort() callback. Sort files by revision number.
+ */
+static int
+compare_files(file_stats_t **lhs, file_stats_t **rhs)
+{
+ return (*lhs)->rev_num < (*rhs)->rev_num;
+}
+
+/* Return all rev (and packed rev) files sorted by revision number.
+ * Allocate the result in POOL.
+ */
+static apr_array_header_t *
+get_rev_files(apr_pool_t *pool)
+{
+ apr_hash_index_t *hi;
+ apr_array_header_t *result = apr_array_make(pool,
+ apr_hash_count(files),
+ sizeof(file_stats_t *));
+
+ /* select all files that have a rev number */
+ for (hi = apr_hash_first(pool, files); hi; hi = apr_hash_next(hi))
+ {
+ const char *name = NULL;
+ apr_ssize_t len = 0;
+ file_stats_t *file = NULL;
+
+ apr_hash_this(hi, (const void **)&name, &len, (void**)&file);
+ if (file->rev_num >= 0)
+ APR_ARRAY_PUSH(result, file_stats_t *) = file;
+ }
+
+ /* sort them */
+ qsort(result->elts, result->nelts, result->elt_size,
+ (int (*)(const void *, const void *))compare_files);
+
+ /* return the result */
+ return result;
+}
+
+/* store VALUE to DEST in little-endian format. Assume that the target
+ * buffer is filled with 0.
+ */
+static void
+write_number(byte *dest, int value)
+{
+ while (value)
+ {
+ *dest = (byte)(value % 256);
+ value /= 256;
+ ++dest;
+ }
+}
+
+/* Return a linearly interpolated y value for X with X0 <= X <= X1 and
+ * the corresponding Y0 and Y1 values.
+ */
+static int
+interpolate(int y0, int x0, int y1, int x1, int x)
+{
+ return y0 + ((y1 - y0) * (x - x0)) / (x1 - x0);
+}
+
+/* Return the BMP-encoded 24 bit COLOR for the given value.
+ */
+static void
+select_color(byte color[3], word value)
+{
+ enum { COLOR_COUNT = 10 };
+
+ /* value -> color table. Missing values get interpolated.
+ * { count, B - G - R } */
+ word table[COLOR_COUNT][4] =
+ {
+ { 0, 255, 255, 255 }, /* unread -> white */
+ { 1, 64, 128, 0 }, /* read once -> turquoise */
+ { 2, 0, 128, 0 }, /* twice -> green */
+ { 8, 0, 192, 192 }, /* 8x -> yellow */
+ { 64, 0, 0, 192 }, /* 64x -> red */
+ { 256, 64, 32, 230 }, /* 256x -> bright red */
+ { 512, 192, 0, 128 }, /* 512x -> purple */
+ { 1024, 96, 32, 96 }, /* 1024x -> UV purple */
+ { 4096, 32, 16, 32 }, /* 4096x -> EUV purple */
+ { 65535, 0, 0, 0 } /* max -> black */
+ };
+
+ /* find upper limit entry for value */
+ int i;
+ for (i = 0; i < COLOR_COUNT; ++i)
+ if (table[i][0] >= value)
+ break;
+
+ /* exact match? */
+ if (table[i][0] == value)
+ {
+ color[0] = (byte)table[i][1];
+ color[1] = (byte)table[i][2];
+ color[2] = (byte)table[i][3];
+ }
+ else
+ {
+ /* interpolate */
+ color[0] = (byte)interpolate(table[i-1][1], table[i-1][0],
+ table[i][1], table[i][0],
+ value);
+ color[1] = (byte)interpolate(table[i-1][2], table[i-1][0],
+ table[i][2], table[i][0],
+ value);
+ color[2] = (byte)interpolate(table[i-1][3], table[i-1][0],
+ table[i][3], table[i][0],
+ value);
+ }
+}
+
+/* Writes a BMP image header to FILE for a 24-bit color picture of the
+ * given XSIZE and YSIZE dimension.
+ */
+static void
+write_bitmap_header(apr_file_t *file, int xsize, int ysize)
+{
+ /* BMP file header (some values need to filled in later)*/
+ byte header[54] =
+ {
+ 'B', 'M', /* magic */
+ 0, 0, 0, 0, /* file size (to be written later) */
+ 0, 0, 0, 0, /* reserved, unused */
+ 54, 0, 0, 0, /* pixel map starts at offset 54dec */
+
+ 40, 0, 0, 0, /* DIB header has 40 bytes */
+ 0, 0, 0, 0, /* x size in pixel */
+ 0, 0, 0, 0, /* y size in pixel */
+ 1, 0, /* 1 color plane */
+ 24, 0, /* 24 bits / pixel */
+ 0, 0, 0, 0, /* no pixel compression used */
+ 0, 0, 0, 0, /* size of pixel array (to be written later) */
+ 0xe8, 3, 0, 0, /* 1 pixel / mm */
+ 0xe8, 3, 0, 0, /* 1 pixel / mm */
+ 0, 0, 0, 0, /* no colors in palette */
+ 0, 0, 0, 0 /* no colors to import */
+ };
+
+ apr_size_t written;
+
+ /* rows in BMP files must be aligned to 4 bytes */
+ int row_size = APR_ALIGN(xsize * 3, 4);
+
+ /* write numbers to header */
+ write_number(header + 2, ysize * row_size + 54);
+ write_number(header + 18, xsize);
+ write_number(header + 22, ysize);
+ write_number(header + 38, ysize * row_size);
+
+ /* write header to file */
+ written = sizeof(header);
+ apr_file_write(file, header, &written);
+}
+
+/* To COLOR, add the fractional value of SOURCE from fractional indexes
+ * SOURCE_START to SOURCE_END and apply the SCALING_FACTOR.
+ */
+static void
+add_sample(color_t color,
+ color_t *source,
+ double source_start,
+ double source_end,
+ double scaling_factor)
+{
+ double factor = (source_end - source_start) / scaling_factor;
+
+ apr_size_t i;
+ for (i = 0; i < sizeof(color_t) / sizeof(*color); ++i)
+ color[i] += (source_end - source_start < 0.5) && source_start > 1.0
+ ? factor * source[(apr_size_t)source_start - 1][i]
+ : factor * source[(apr_size_t)source_start][i];
+}
+
+/* Scale the IN_LEN RGB values from IN to OUT_LEN RGB values in OUT.
+ */
+static void
+scale_line(color_t* out,
+ int out_len,
+ color_t *in,
+ int in_len)
+{
+ double scaling_factor = (double)(in_len) / (double)(out_len);
+
+ apr_size_t i;
+ memset(out, 0, out_len * sizeof(color_t));
+ for (i = 0; i < out_len; ++i)
+ {
+ color_t color = { 0 };
+
+ double source_start = i * scaling_factor;
+ double source_end = (i + 1) * scaling_factor;
+
+ if ((apr_size_t)source_start == (apr_size_t)source_end)
+ {
+ add_sample(color, in, source_start, source_end, scaling_factor);
+ }
+ else
+ {
+ apr_size_t k;
+ apr_size_t first_sample_end = (apr_size_t)source_start + 1;
+ apr_size_t last_sample_start = (apr_size_t)source_end;
+
+ add_sample(color, in, source_start, first_sample_end, scaling_factor);
+ for (k = first_sample_end; k < last_sample_start; ++k)
+ add_sample(color, in, k, k + 1, scaling_factor);
+
+ add_sample(color, in, last_sample_start, source_end, scaling_factor);
+ }
+
+ memcpy(out[i], color, sizeof(color));
+ }
+}
+
+/* Write the cluster read map for all files in INFO as BMP image to FILE.
+ * If MAX_X is not 0, scale all lines to MAX_X pixels. Use POOL for
+ * allocations.
+ */
+static void
+write_bitmap(apr_array_header_t *info,
+ int max_x,
+ apr_file_t *file,
+ apr_pool_t *pool)
+{
+ int ysize = info->nelts;
+ int xsize = 0;
+ int x, y;
+ apr_size_t row_size;
+ apr_size_t written;
+ color_t *line, *scaled_line;
+ svn_boolean_t do_scale = max_x > 0;
+
+ /* xsize = max cluster number */
+ for (y = 0; y < ysize; ++y)
+ if (xsize < APR_ARRAY_IDX(info, y, file_stats_t *)->read_map->nelts)
+ xsize = APR_ARRAY_IDX(info, y, file_stats_t *)->read_map->nelts;
+
+ /* limit picture dimensions (16k pixels in each direction) */
+ if (xsize >= 0x4000)
+ xsize = 0x3fff;
+ if (ysize >= 0x4000)
+ ysize = 0x3fff;
+ if (max_x == 0)
+ max_x = xsize;
+
+ /* rows in BMP files must be aligned to 4 bytes */
+ row_size = APR_ALIGN(max_x * sizeof(color_t), 4);
+
+ /**/
+ line = apr_pcalloc(pool, xsize * sizeof(color_t));
+ scaled_line = apr_pcalloc(pool, row_size);
+
+ /* write header to file */
+ write_bitmap_header(file, max_x, ysize);
+
+ /* write all rows */
+ for (y = 0; y < ysize; ++y)
+ {
+ file_stats_t *file_info = APR_ARRAY_IDX(info, y, file_stats_t *);
+ int block_count = file_info->read_map->nelts;
+ for (x = 0; x < xsize; ++x)
+ {
+ color_t color = { 128, 128, 128 };
+ if (x < block_count)
+ {
+ word count = APR_ARRAY_IDX(file_info->read_map, x, word);
+ select_color(color, count);
+ }
+
+ memcpy(line[x], color, sizeof(color));
+ }
+
+ scale_line(scaled_line, max_x, line, block_count ? block_count : 1);
+
+ written = row_size;
+ apr_file_write(file, do_scale ? scaled_line : line, &written);
+ }
+}
+
+/* write a color bar with (roughly) logarithmic scale as BMP image to FILE.
+ */
+static void
+write_scale(apr_file_t *file)
+{
+ int x;
+ word value = 0, inc = 1;
+
+ /* write header to file */
+ write_bitmap_header(file, 64, 1);
+
+ for (x = 0; x < 64; ++x)
+ {
+ apr_size_t written;
+ byte color[3] = { 128, 128, 128 };
+
+ select_color(color, value);
+ if (value + (int)inc < 0x10000)
+ {
+ value += inc;
+ if (value >= 8 * inc)
+ inc *= 2;
+ }
+
+ written = sizeof(color);
+ apr_file_write(file, color, &written);
+ }
+}
+
+/* Write a summary of the I/O ops to stdout.
+ * Use POOL for temporaries.
+ */
+static void
+print_stats(apr_pool_t *pool)
+{
+ apr_int64_t open_count = 0;
+ apr_int64_t seek_count = 0;
+ apr_int64_t read_count = 0;
+ apr_int64_t read_size = 0;
+ apr_int64_t clusters_read = 0;
+ apr_int64_t unique_clusters_read = 0;
+ apr_int64_t uncached_seek_count = 0;
+ apr_int64_t unnecessary_seek_count = 0;
+ apr_int64_t empty_read_count = 0;
+
+ apr_hash_index_t *hi;
+ for (hi = apr_hash_first(pool, files); hi; hi = apr_hash_next(hi))
+ {
+ const char *name = NULL;
+ apr_ssize_t len = 0;
+ file_stats_t *file = NULL;
+
+ apr_hash_this(hi, (const void **)&name, &len, (void**)&file);
+
+ open_count += file->open_count;
+ seek_count += file->seek_count;
+ read_count += file->read_count;
+ read_size += file->read_size;
+ clusters_read += file->clusters_read;
+ unique_clusters_read += file->unique_clusters_read;
+ uncached_seek_count += file->uncached_seek_count;
+ unnecessary_seek_count += file->unnecessary_seeks;
+ empty_read_count += file->empty_reads;
+ }
+
+ printf("%20s files\n", svn__i64toa_sep(apr_hash_count(files), ',', pool));
+ printf("%20s files opened\n", svn__i64toa_sep(open_count, ',', pool));
+ printf("%20s seeks\n", svn__i64toa_sep(seek_count, ',', pool));
+ printf("%20s unnecessary seeks\n", svn__i64toa_sep(unnecessary_seek_count, ',', pool));
+ printf("%20s uncached seeks\n", svn__i64toa_sep(uncached_seek_count, ',', pool));
+ printf("%20s reads\n", svn__i64toa_sep(read_count, ',', pool));
+ printf("%20s empty reads\n", svn__i64toa_sep(empty_read_count, ',', pool));
+ printf("%20s unique clusters read\n", svn__i64toa_sep(unique_clusters_read, ',', pool));
+ printf("%20s clusters read\n", svn__i64toa_sep(clusters_read, ',', pool));
+ printf("%20s bytes read\n", svn__i64toa_sep(read_size, ',', pool));
+}
+
+/* Some help output. */
+static void
+print_usage(void)
+{
+ printf("fsfs-access-map <file>\n\n");
+ printf("Reads strace of some FSFS-based tool from <file>, prints some stats\n");
+ printf("and writes a cluster access map to 'access.bmp' the current folder.\n");
+ printf("Each pixel corresponds to one 64kB cluster and every line to a rev\n");
+ printf("or packed rev file in the repository. Turquoise and green indicate\n");
+ printf("1 and 2 hits, yellow to read-ish colors for up to 20, shares of\n");
+ printf("for up to 100 and black for > 200 hits.\n\n");
+ printf("A typical strace invocation looks like this:\n");
+ printf("strace -e trace=open,close,read,lseek -o strace.txt svn log ...\n");
+}
+
+/* linear control flow */
+int main(int argc, const char *argv[])
+{
+ apr_pool_t *pool = NULL;
+ apr_file_t *file = NULL;
+
+ apr_initialize();
+ atexit(apr_terminate);
+
+ pool = svn_pool_create(NULL);
+ files = apr_hash_make(pool);
+ handles = apr_hash_make(pool);
+
+ if (argc == 2)
+ apr_file_open(&file, argv[1], APR_READ | APR_BUFFERED, APR_OS_DEFAULT,
+ pool);
+ if (file == NULL)
+ {
+ print_usage();
+ return 0;
+ }
+ parse_file(file);
+ apr_file_close(file);
+
+ print_stats(pool);
+
+ apr_file_open(&file, "access.bmp",
+ APR_WRITE | APR_CREATE | APR_TRUNCATE | APR_BUFFERED,
+ APR_OS_DEFAULT, pool);
+ write_bitmap(get_rev_files(pool), 0, file, pool);
+ apr_file_close(file);
+
+ apr_file_open(&file, "access_scaled.bmp",
+ APR_WRITE | APR_CREATE | APR_TRUNCATE | APR_BUFFERED,
+ APR_OS_DEFAULT, pool);
+ write_bitmap(get_rev_files(pool), 1024, file, pool);
+ apr_file_close(file);
+
+ apr_file_open(&file, "scale.bmp",
+ APR_WRITE | APR_CREATE | APR_TRUNCATE | APR_BUFFERED,
+ APR_OS_DEFAULT, pool);
+ write_scale(file);
+ apr_file_close(file);
+
+ return 0;
+}
diff --git a/tools/dev/gdb-py/README b/tools/dev/gdb-py/README
new file mode 100644
index 0000000..38133f1
--- /dev/null
+++ b/tools/dev/gdb-py/README
@@ -0,0 +1,29 @@
+This directory includes a Python module which will integrate with gdb which
+can be used to pretty-print various Subversion types. For additional
+information about gdb pretty-printing, see:
+
+ http://sourceware.org/gdb/onlinedocs/gdb/Pretty-Printing.html
+
+
+How to Use
+----------
+To enable pretty printing of selected Subversion types, put the following code
+in your ~/.gdbinit:
+
+[[[
+python
+import sys, os.path
+sys.path.insert(0, os.path.expanduser('~/dev/svn-trunk/tools/dev/gdb-py'))
+from svndbg.printers import register_libsvn_printers
+register_libsvn_printers(None)
+end
+]]]
+
+Change the path to point to the correct location on your platform for the
+gdb-py directory, and then load gdb. Everything should Just Work.
+(I believe this requires gdb >= 7.0, but earlier versions may also work.)
+
+The list of currently supported types for pretty printing is a bit lacking,
+so should you run into a type which could be useful to be pretty printed,
+read the documentation referenced above and follow the existing examples
+to extend the pretty-printing support. Enjoy!
diff --git a/tools/dev/gdb-py/svndbg/__init__.py b/tools/dev/gdb-py/svndbg/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/dev/gdb-py/svndbg/__init__.py
diff --git a/tools/dev/gdb-py/svndbg/printers.py b/tools/dev/gdb-py/svndbg/printers.py
new file mode 100644
index 0000000..f1ee085
--- /dev/null
+++ b/tools/dev/gdb-py/svndbg/printers.py
@@ -0,0 +1,417 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+import gdb
+import re
+
+import gdb.printing
+from gdb.printing import RegexpCollectionPrettyPrinter
+
+
+class TypedefRegexCollectionPrettyPrinter(RegexpCollectionPrettyPrinter):
+ """Class for implementing a collection of pretty-printers, matching the
+ type name to a regular expression.
+
+ A pretty-printer in this collection will be used if the type of the
+ value to be printed matches the printer's regular expression, or if
+ the value is a pointer to and/or typedef to a type name that matches
+ its regular expression. The variations are tried in this order:
+
+ 1. the type name as known to the debugger (could be a 'typedef');
+ 2. the type after stripping off any number of layers of 'typedef';
+ 3. if it is a pointer, the pointed-to type;
+ 4. if it is a pointer, the pointed-to type minus some 'typedef's.
+
+ In all cases, ignore 'const' and 'volatile' qualifiers. When
+ matching the pointed-to type, dereference the value or use 'None' if
+ the value was a null pointer.
+
+ This class is modeled on RegexpCollectionPrettyPrinter, which (in GDB
+ 7.3) matches on the base type's tag name and can't match a pointer
+ type or any other type that doesn't have a tag name.
+ """
+
+ def __init__(self, name):
+ super(TypedefRegexCollectionPrettyPrinter, self).__init__(name)
+
+ def __call__(self, val):
+ """Find and return an instantiation of a printer for VAL.
+ """
+
+ def lookup_type(type, val):
+ """Return the first printer whose regular expression matches the
+ name (tag name for struct/union/enum types) of TYPE, ignoring
+ any 'const' or 'volatile' qualifiers.
+
+ VAL is a gdb.Value, or may be None to indicate a dereferenced
+ null pointer. TYPE is the associated gdb.Type.
+ """
+ if type.code in [gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION,
+ gdb.TYPE_CODE_ENUM]:
+ typename = type.tag
+ else:
+ typename = str(type.unqualified())
+ for printer in self.subprinters:
+ if printer.enabled and printer.compiled_re.search(typename):
+ return printer.gen_printer(val)
+
+ def lookup_type_or_alias(type, val):
+ """Return the first printer matching TYPE, or else if TYPE is a
+ typedef then the first printer matching the aliased type.
+
+ VAL is a gdb.Value, or may be None to indicate a dereferenced
+ null pointer. TYPE is the associated gdb.Type.
+ """
+ # First, look for a printer for the given (but unqualified) type.
+ printer = lookup_type(type, val)
+ if printer:
+ return printer
+
+ # If it's a typedef, look for a printer for the aliased type ...
+ while type.code == gdb.TYPE_CODE_TYPEDEF:
+ type = type.target()
+ printer = lookup_type(type, val)
+ if printer:
+ return printer
+
+ # First, look for a printer for the given (but unqualified) type, or
+ # its aliased type if it's a typedef.
+ printer = lookup_type_or_alias(val.type, val)
+ if printer:
+ return printer
+
+ # If it's a pointer, look for a printer for the pointed-to type.
+ if val.type.code == gdb.TYPE_CODE_PTR:
+ type = val.type.target()
+ printer = lookup_type_or_alias(
+ type, val and val.dereference() or None)
+ if printer:
+ return printer
+
+ # Cannot find a matching pretty printer in this collection.
+ return None
+
+class InferiorFunction:
+ """A class whose instances are callable functions on the inferior
+ process.
+ """
+ def __init__(self, function_name):
+ self.function_name = function_name
+ self.func = None
+
+ def __call__(self, *args):
+ if not self.func:
+ self.func = gdb.parse_and_eval(self.function_name)
+ return self.func(*args)
+
+def children_as_map(children_iterator):
+ """Convert an iteration of (key, value) pairs into the form required for
+ a pretty-printer 'children' method when the display-hint is 'map'.
+ """
+ for k, v in children_iterator:
+ yield 'key', k
+ yield 'val', v
+
+
+########################################################################
+
+# Pretty-printing for APR library types.
+
+# Some useful gdb.Type instances that can be initialized before any object
+# files are loaded.
+pvoidType = gdb.lookup_type('void').pointer()
+cstringType = gdb.lookup_type('char').pointer()
+
+# Some functions that resolve to calls into the inferior process.
+apr_hash_count = InferiorFunction('apr_hash_count')
+apr_hash_first = InferiorFunction('apr_hash_first')
+apr_hash_next = InferiorFunction('apr_hash_next')
+apr_hash_this_key = InferiorFunction('apr_hash_this_key')
+apr_hash_this_val = InferiorFunction('apr_hash_this_val')
+
+def children_of_apr_hash(hash_p, value_type=None):
+ """Iterate over an 'apr_hash_t *' GDB value, in the way required for a
+ pretty-printer 'children' method when the display-hint is 'map'.
+ Cast the value pointers to VALUE_TYPE, or return values as '...' if
+ VALUE_TYPE is None.
+ """
+ hi = apr_hash_first(0, hash_p)
+ while (hi):
+ k = apr_hash_this_key(hi).reinterpret_cast(cstringType)
+ if value_type:
+ val = apr_hash_this_val(hi).reinterpret_cast(value_type)
+ else:
+ val = '...'
+ try:
+ key = k.string()
+ except:
+ key = '<unreadable>'
+ yield key, val
+ hi = apr_hash_next(hi)
+
+class AprHashPrinter:
+ """for 'apr_hash_t' of 'char *' keys and unknown values"""
+ def __init__(self, val):
+ if val:
+ self.hash_p = val.address
+ else:
+ self.hash_p = val
+
+ def to_string(self):
+ """Return a string to be displayed before children are displayed, or
+ return None if we don't want any such.
+ """
+ if not self.hash_p:
+ return 'NULL'
+ return 'hash of ' + str(apr_hash_count(self.hash_p)) + ' items'
+
+ def children(self):
+ if not self.hash_p:
+ return []
+ return children_as_map(children_of_apr_hash(self.hash_p))
+
+ def display_hint(self):
+ return 'map'
+
+def children_of_apr_array(array, value_type):
+ """Iterate over an 'apr_array_header_t' GDB value, in the way required for
+ a pretty-printer 'children' method when the display-hint is 'array'.
+ Cast the values to VALUE_TYPE.
+ """
+ nelts = int(array['nelts'])
+ elts = array['elts'].reinterpret_cast(value_type.pointer())
+ for i in range(nelts):
+ yield str(i), elts[i]
+
+class AprArrayPrinter:
+ """for 'apr_array_header_t' of unknown elements"""
+ def __init__(self, val):
+ self.array = val
+
+ def to_string(self):
+ if not self.array:
+ return 'NULL'
+ nelts = self.array['nelts']
+ return 'array of ' + str(int(nelts)) + ' items'
+
+ def children(self):
+ # We can't display the children as we don't know their type.
+ return []
+
+ def display_hint(self):
+ return 'array'
+
+########################################################################
+
+# Pretty-printing for Subversion libsvn_subr types.
+
+class SvnBooleanPrinter:
+ """for svn_boolean_t"""
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ if self.val is None:
+ return '(NULL)'
+ if self.val:
+ return 'TRUE'
+ else:
+ return 'FALSE'
+
+class SvnStringPrinter:
+ """for svn_string_t"""
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ if not self.val:
+ return 'NULL'
+
+ data = self.val['data']
+ len = int(self.val['len'])
+ return data.string(length=len)
+
+ def display_hint(self):
+ if self.val:
+ return 'string'
+
+class SvnMergeRangePrinter:
+ """for svn_merge_range_t"""
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ if not self.val:
+ return 'NULL'
+
+ r = self.val
+ start = int(r['start'])
+ end = int(r['end'])
+ if start >= 0 and start < end:
+ if start + 1 == end:
+ rs = str(end)
+ else:
+ rs = str(start + 1) + '-' + str(end)
+ elif end >= 0 and end < start:
+ if start == end + 1:
+ rs = '-' + str(start)
+ else:
+ rs = str(start) + '-' + str(end + 1)
+ else:
+ rs = '(INVALID: s=%d, e=%d)' % (start, end)
+ if not r['inheritable']:
+ rs += '*'
+ return rs
+
+ def display_hint(self):
+ if self.val:
+ return 'string'
+
+class SvnRangelistPrinter:
+ """for svn_rangelist_t"""
+ def __init__(self, val):
+ self.array = val
+ self.svn_merge_range_t = gdb.lookup_type('svn_merge_range_t')
+
+ def to_string(self):
+ if not self.array:
+ return 'NULL'
+
+ s = ''
+ for key, val in children_of_apr_array(self.array,
+ self.svn_merge_range_t.pointer()):
+ if s:
+ s += ','
+ s += SvnMergeRangePrinter(val).to_string()
+ return s
+
+ def display_hint(self):
+ if self.array:
+ return 'string'
+
+class SvnMergeinfoPrinter:
+ """for svn_mergeinfo_t"""
+ def __init__(self, val):
+ self.hash_p = val
+ self.svn_rangelist_t = gdb.lookup_type('svn_rangelist_t')
+
+ def to_string(self):
+ if self.hash_p == 0:
+ return 'NULL'
+
+ s = ''
+ for key, val in children_of_apr_hash(self.hash_p,
+ self.svn_rangelist_t.pointer()):
+ if s:
+ s += '; '
+ s += key + ':' + SvnRangelistPrinter(val).to_string()
+ return '{ ' + s + ' }'
+
+class SvnMergeinfoCatalogPrinter:
+ """for svn_mergeinfo_catalog_t"""
+ def __init__(self, val):
+ self.hash_p = val
+ self.svn_mergeinfo_t = gdb.lookup_type('svn_mergeinfo_t')
+
+ def to_string(self):
+ if self.hash_p == 0:
+ return 'NULL'
+
+ s = ''
+ for key, val in children_of_apr_hash(self.hash_p,
+ self.svn_mergeinfo_t):
+ if s:
+ s += ',\n '
+ s += "'" + key + "': " + SvnMergeinfoPrinter(val).to_string()
+ return '{ ' + s + ' }'
+
+########################################################################
+
+# Pretty-printing for Subversion libsvn_client types.
+
+class SvnPathrevPrinter:
+ """for svn_client__pathrev_t"""
+ def __init__(self, val):
+ self.val = val
+
+ def to_string(self):
+ if not self.val:
+ return 'NULL'
+
+ rev = int(self.val['rev'])
+ url = self.val['url'].string()
+ repos_root_url = self.val['repos_root_url'].string()
+ relpath = url[len(repos_root_url):]
+ return "%s@%d" % (relpath, rev)
+
+ def display_hint(self):
+ if self.val:
+ return 'string'
+
+
+########################################################################
+
+libapr_printer = None
+libsvn_printer = None
+
+def build_libsvn_printers():
+ """Construct the pretty-printer objects."""
+
+ global libapr_printer, libsvn_printer
+
+ libapr_printer = TypedefRegexCollectionPrettyPrinter("libapr")
+ libapr_printer.add_printer('apr_hash_t', r'^apr_hash_t$',
+ AprHashPrinter)
+ libapr_printer.add_printer('apr_array_header_t', r'^apr_array_header_t$',
+ AprArrayPrinter)
+
+ libsvn_printer = TypedefRegexCollectionPrettyPrinter("libsvn")
+ libsvn_printer.add_printer('svn_boolean_t', r'^svn_boolean_t$',
+ SvnBooleanPrinter)
+ libsvn_printer.add_printer('svn_string_t', r'^svn_string_t$',
+ SvnStringPrinter)
+ libsvn_printer.add_printer('svn_client__pathrev_t', r'^svn_client__pathrev_t$',
+ SvnPathrevPrinter)
+ libsvn_printer.add_printer('svn_merge_range_t', r'^svn_merge_range_t$',
+ SvnMergeRangePrinter)
+ libsvn_printer.add_printer('svn_rangelist_t', r'^svn_rangelist_t$',
+ SvnRangelistPrinter)
+ libsvn_printer.add_printer('svn_mergeinfo_t', r'^svn_mergeinfo_t$',
+ SvnMergeinfoPrinter)
+ libsvn_printer.add_printer('svn_mergeinfo_catalog_t', r'^svn_mergeinfo_catalog_t$',
+ SvnMergeinfoCatalogPrinter)
+
+
+def register_libsvn_printers(obj):
+ """Register the pretty-printers for the object file OBJ."""
+
+ global libapr_printer, libsvn_printer
+
+ # Printers registered later take precedence.
+ gdb.printing.register_pretty_printer(obj, libapr_printer)
+ gdb.printing.register_pretty_printer(obj, libsvn_printer)
+
+
+# Construct the pretty-printer objects, once, at GDB start-up time when this
+# Python module is loaded. (Registration happens later, once per object
+# file.)
+build_libsvn_printers()
diff --git a/tools/dev/gen-javahl-errors.py b/tools/dev/gen-javahl-errors.py
new file mode 100755
index 0000000..c949d4a
--- /dev/null
+++ b/tools/dev/gen-javahl-errors.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+#
+# gen-javahl-errors.py: Generate a Java class containing an enum for the
+# C error codes
+#
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+#
+
+import sys, os
+
+try:
+ from svn import core
+except ImportError as e:
+ sys.stderr.write("ERROR: Unable to import Subversion's Python bindings: '%s'\n" \
+ "Hint: Set your PYTHONPATH environment variable, or adjust your " \
+ "PYTHONSTARTUP\nfile to point to your Subversion install " \
+ "location's svn-python directory.\n" % e)
+ sys.stderr.flush()
+ sys.exit(1)
+
+def get_errors():
+ errs = {}
+ for key in vars(core):
+ if key.find('SVN_ERR_') == 0:
+ try:
+ val = int(vars(core)[key])
+ errs[val] = key
+ except:
+ pass
+ return errs
+
+def gen_javahl_class(error_codes, output_filename):
+ jfile = open(output_filename, 'w')
+ jfile.write(
+"""/** ErrorCodes.java - This file is autogenerated by gen-javahl-errors.py
+ */
+
+package org.tigris.subversion.javahl;
+
+/**
+ * Provide mappings from error codes generated by the C runtime to meaningful
+ * Java values. For a better description of each error, please see
+ * svn_error_codes.h in the C source.
+ */
+public class ErrorCodes
+{
+""")
+
+ keys = sorted(error_codes.keys())
+
+ for key in keys:
+ # Format the code name to be more Java-esque
+ code_name = error_codes[key][8:].replace('_', ' ').title().replace(' ', '')
+ code_name = code_name[0].lower() + code_name[1:]
+
+ jfile.write(" public static final int %s = %d;\n" % (code_name, key))
+
+ jfile.write("}\n")
+ jfile.close()
+
+if __name__ == "__main__":
+ if len(sys.argv) > 1:
+ output_filename = sys.argv[1]
+ else:
+ output_filename = os.path.join('..', '..', 'subversion', 'bindings',
+ 'javahl', 'src', 'org', 'tigris',
+ 'subversion', 'javahl', 'ErrorCodes.java')
+
+ gen_javahl_class(get_errors(), output_filename)
diff --git a/tools/dev/gen-py-errors.py b/tools/dev/gen-py-errors.py
new file mode 100755
index 0000000..266e04b
--- /dev/null
+++ b/tools/dev/gen-py-errors.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+#
+# gen-py-errors.py: Generate a python module which maps error names to numbers.
+# (The purpose being easier writing of the python tests.)
+#
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+#
+#
+# Locates svn_error_codes.h based on its relative location to this script.
+#
+# Generates to STDOUT. Typically, redirect this into svntest/err.py
+#
+
+import sys
+import os
+import re
+
+HEADER = '''#!/usr/bin/env python
+### This file automatically generated by tools/dev/gen-py-errors.py,
+### which see for more information
+###
+### It is versioned for convenience.
+'''
+
+# Established by svn 1.0. May as well hard-code these.
+APR_OS_START_ERROR = 20000
+APR_OS_START_USERERR = APR_OS_START_ERROR + 50000 * 2
+SVN_ERR_CATEGORY_SIZE = 5000
+
+RE_CAT_NAME = re.compile(r'SVN_ERR_([A-Z_]+)_CATEG')
+RE_CAT_VALUE = re.compile(r'\d+')
+
+RE_DEF_NAME = re.compile(r'SVN_ERRDEF\(([A-Z0-9_]+)')
+RE_DEF_VALUE = re.compile(r'SVN_ERR_([A-Z_]+)_CATEG[^0-9]*([0-9]+)')
+
+
+def write_output(codes):
+ print(HEADER)
+
+ for name, value in codes:
+ # skip SVN_ERR_ on the name
+ print('%s = %d' % (name[8:], value))
+
+
+def main(codes_fname):
+ categ = { }
+ codes = [ ]
+
+ f = open(codes_fname)
+
+ # Parse all the category start values
+ while True:
+ line = f.next()
+ m = RE_CAT_NAME.search(line)
+ if m:
+ name = m.group(1)
+ m = RE_CAT_VALUE.search(f.next())
+ assert m
+ value = int(m.group(0))
+ categ[name] = APR_OS_START_USERERR + value * SVN_ERR_CATEGORY_SIZE
+
+ elif line.strip() == 'SVN_ERROR_START':
+ break
+
+ # Parse each of the error values
+ while True:
+ line = f.next()
+ m = RE_DEF_NAME.search(line)
+ if m:
+ name = m.group(1)
+ line = f.next()
+ m = RE_DEF_VALUE.search(line)
+ if not m:
+ # SVN_ERR_WC_NOT_DIRECTORY is defined as equal to NOT_WORKING_COPY
+ # rather than relative to SVN_ERR_WC_CATEGORY_START
+ #print 'SKIP:', line
+ continue
+ cat = m.group(1)
+ value = int(m.group(2))
+ codes.append((name, categ[cat] + value))
+
+ elif line.strip() == 'SVN_ERROR_END':
+ break
+
+ write_output(sorted(codes))
+
+
+if __name__ == '__main__':
+ this_dir = os.path.dirname(os.path.abspath(__file__))
+ codes_fname = os.path.join(this_dir, os.path.pardir, os.path.pardir,
+ 'subversion', 'include', 'svn_error_codes.h')
+ main(codes_fname)
diff --git a/tools/dev/gen_junit_report.py b/tools/dev/gen_junit_report.py
new file mode 100755
index 0000000..8ce158c
--- /dev/null
+++ b/tools/dev/gen_junit_report.py
@@ -0,0 +1,301 @@
+#!/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+# $Id: gen_junit_report.py 1741723 2016-04-30 08:16:53Z stefan2 $
+"""
+gen_junit_report.py -- The script is to generate the junit report for
+Subversion tests. The script uses the log file, tests.log created by
+"make check" process. It parses the log file and generate the junit
+files for each test separately in the specified output directory. The
+script can take --log-file and --output-dir arguments.
+"""
+
+import sys
+import os
+import getopt
+
+def replace_from_map(data, encode):
+ """replace substrings in DATA with replacements defined in ENCODING"""
+ for pattern, replacement in encode.items():
+ data = data.replace(pattern, replacement)
+ return data
+
+xml_encode_map = {
+ '&': '&amp;',
+ '<': '&lt;',
+ '>': '&gt;',
+ '"': '&quot;',
+ "'": '&apos;',
+ }
+
+def xml_encode(data):
+ """encode the xml characters in the data"""
+ return replace_from_map(data, xml_encode_map)
+
+special_encode_map = {
+ ']]>': ']]]]><![CDATA[>', # CDATA terminator sequence
+ '\000': '&#9216;', # U+2400 SYMBOL FOR NULL
+ '\001': '&#9217;', # U+2401 SYMBOL FOR START OF HEADING
+ '\002': '&#9218;', # U+2402 SYMBOL FOR START OF TEXT
+ '\003': '&#9219;', # U+2403 SYMBOL FOR END OF TEXT
+ '\004': '&#9220;', # U+2404 SYMBOL FOR END OF TRANSMISSION
+ '\005': '&#9221;', # U+2405 SYMBOL FOR ENQUIRY
+ '\006': '&#9222;', # U+2406 SYMBOL FOR ACKNOWLEDGE
+ '\007': '&#9223;', # U+2407 SYMBOL FOR BELL
+ '\010': '&#9224;', # U+2408 SYMBOL FOR BACKSPACE
+ '\011': '&#9225;', # U+2409 SYMBOL FOR HORIZONTAL TABULATION
+ #'\012': '&#9226;', # U+240A SYMBOL FOR LINE FEED
+ '\013': '&#9227;', # U+240B SYMBOL FOR VERTICAL TABULATION
+ '\014': '&#9228;', # U+240C SYMBOL FOR FORM FEED
+ #'\015': '&#9229;', # U+240D SYMBOL FOR CARRIAGE RETURN
+ '\016': '&#9230;', # U+240E SYMBOL FOR SHIFT OUT
+ '\017': '&#9231;', # U+240F SYMBOL FOR SHIFT IN
+ '\020': '&#9232;', # U+2410 SYMBOL FOR DATA LINK ESCAPE
+ '\021': '&#9233;', # U+2411 SYMBOL FOR DEVICE CONTROL ONE
+ '\022': '&#9234;', # U+2412 SYMBOL FOR DEVICE CONTROL TWO
+ '\023': '&#9235;', # U+2413 SYMBOL FOR DEVICE CONTROL THREE
+ '\024': '&#9236;', # U+2414 SYMBOL FOR DEVICE CONTROL FOUR
+ '\025': '&#9237;', # U+2415 SYMBOL FOR NEGATIVE ACKNOWLEDGE
+ '\026': '&#9238;', # U+2416 SYMBOL FOR SYNCHRONOUS IDLE
+ '\027': '&#9239;', # U+2417 SYMBOL FOR END OF TRAMSNISSION BLOCK
+ '\030': '&#9240;', # U+2418 SYMBOL FOR CANCEL
+ '\031': '&#9241;', # U+2419 SYMBOL FOR END OF MEDIUM
+ '\032': '&#9242;', # U+241A SYMBOL FOR SUBSTITUTE
+ '\033': '&#9243;', # U+241B SYMBOL FOR ESCAPE
+ '\034': '&#9244;', # U+241C SYMBOL FOR FILE SEPARATOR
+ '\035': '&#9245;', # U+241D SYMBOL FOR GROUP SEPARATOR
+ '\036': '&#9246;', # U+241E SYMBOL FOR RECORD SEPARATOR
+ '\037': '&#9247;', # U+241F SYMBOL FOR UNIT SEPARATOR
+ }
+
+def escape_special_characters(data):
+ """remove special characters in test failure reasons"""
+ if data:
+ data = replace_from_map(data, special_encode_map)
+ return data
+
+def start_junit():
+ """define the beginning of xml document"""
+ head = """<?xml version="1.0" encoding="UTF-8"?>"""
+ return head
+
+def start_testsuite(test_name):
+ """start testsuite. The value for the attributes are replaced later
+ when the junit file handling is concluded"""
+ sub_test_name = test_name.replace('.', '-')
+ start = """<testsuite time="ELAPSED_%s" tests="TOTAL_%s" name="%s"
+ failures="FAIL_%s" errors="FAIL_%s" skipped="SKIP_%s">""" % \
+ (test_name, test_name, sub_test_name, test_name, test_name, test_name)
+ return start
+
+def junit_testcase_ok(test_name, casename):
+ """mark the test case as PASSED"""
+ casename = xml_encode(casename)
+ sub_test_name = test_name.replace('.', '-')
+ case = """<testcase time="ELAPSED_CASE_%s" name="%s" classname="%s"/>""" % \
+ (test_name, casename, sub_test_name)
+ return case
+
+def junit_testcase_fail(test_name, casename, reason=None):
+ """mark the test case as FAILED"""
+ casename = xml_encode(casename)
+ sub_test_name = test_name.replace('.', '-')
+ reason = escape_special_characters(reason)
+ case = """<testcase time="ELAPSED_CASE_%s" name="%s" classname="%s">
+ <failure type="Failed"><![CDATA[%s]]></failure>
+ </testcase>""" % (test_name, casename, sub_test_name, reason)
+ return case
+
+def junit_testcase_xfail(test_name, casename, reason=None):
+ """mark the test case as XFAILED"""
+ casename = xml_encode(casename)
+ sub_test_name = test_name.replace('.', '-')
+ reason = escape_special_characters(reason)
+ case = """<testcase time="ELAPSED_CASE_%s" name="%s" classname="%s">
+ <system-out><![CDATA[%s]]></system-out>
+ </testcase>""" % (test_name, casename, sub_test_name, reason)
+ return case
+
+def junit_testcase_skip(test_name, casename):
+ """mark the test case as SKIPPED"""
+ casename = xml_encode(casename)
+ sub_test_name = test_name.replace('.', '-')
+ case = """<testcase time="ELAPSED_CASE_%s" name="%s" classname="%s">
+ <skipped message="Skipped"/>
+ </testcase>""" % (test_name, casename, sub_test_name)
+ return case
+
+def end_testsuite():
+ """mark the end of testsuite"""
+ end = """</testsuite>"""
+ return end
+
+def update_stat(test_name, junit, count):
+ """update the test statistics in the junit string"""
+ junit_str = '\n'.join(junit)
+ t_count = count[test_name]
+ total = float(t_count['pass'] + t_count['fail'] + t_count['skip'])
+ elapsed = float(t_count['elapsed'])
+ case_time = 0
+ if total > 0: # there are tests with no test cases
+ case_time = elapsed/total
+
+ total_patt = 'TOTAL_%s' % test_name
+ fail_patt = 'FAIL_%s' % test_name
+ skip_patt = 'SKIP_%s' % test_name
+ elapsed_patt = 'ELAPSED_%s' % test_name
+ elapsed_case_patt = 'ELAPSED_CASE_%s' % test_name
+
+ # replace the pattern in junit string with actual statistics
+ junit_str = junit_str.replace(total_patt, "%s" % total)
+ junit_str = junit_str.replace(fail_patt, "%s" % t_count['fail'])
+ junit_str = junit_str.replace(skip_patt, "%s" % t_count['skip'])
+ junit_str = junit_str.replace(elapsed_patt, "%.3f" % elapsed)
+ junit_str = junit_str.replace(elapsed_case_patt, "%.3f" % case_time)
+ return junit_str
+
+def main():
+ """main method"""
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'l:d:h',
+ ['log-file=', 'output-dir=', 'help'])
+ except getopt.GetoptError as err:
+ usage(err)
+
+ log_file = None
+ output_dir = None
+ for opt, value in opts:
+ if (opt in ('-h', '--help')):
+ usage()
+ elif (opt in ('-l', '--log-file')):
+ log_file = value
+ elif (opt in ('-d', '--output-dir')):
+ output_dir = value
+ else:
+ usage('Unable to recognize option')
+
+ if not log_file or not output_dir:
+ usage("The options --log-file and --output-dir are mandatory")
+
+ # create junit output directory, if not exists
+ if not os.path.exists(output_dir):
+ print("Directory '%s' not exists, creating ..." % output_dir)
+ try:
+ os.makedirs(output_dir)
+ except OSError as err:
+ sys.stderr.write("ERROR: %s\n" % err)
+ sys.exit(1)
+ patterns = {
+ 'start' : 'START:',
+ 'end' : 'END:',
+ 'pass' : 'PASS:',
+ 'skip' : 'SKIP:',
+ 'fail' : 'FAIL:',
+ 'xfail' : 'XFAIL:',
+ 'elapsed' : 'ELAPSED:'
+ }
+
+ junit = []
+ junit.append(start_junit())
+ reason = None
+ count = {}
+ fp = None
+ try:
+ fp = open(log_file, 'r')
+ except IOError as err:
+ sys.stderr.write("ERROR: %s\n" % err)
+ sys.exit(1)
+
+ for line in fp.readlines():
+ line = line.strip()
+ if line.startswith(patterns['start']):
+ reason = ""
+ test_name = line.split(' ')[1]
+ # replace '.' in test name with '_' to avoid confusing class
+ # name in test result displayed in the CI user interface
+ test_name.replace('.', '_')
+ count[test_name] = {
+ 'pass' : 0,
+ 'skip' : 0,
+ 'fail' : 0,
+ 'xfail' : 0,
+ 'elapsed' : 0,
+ 'total' : 0
+ }
+ junit.append(start_testsuite(test_name))
+ elif line.startswith(patterns['end']):
+ junit.append(end_testsuite())
+ elif line.startswith(patterns['pass']):
+ reason = ""
+ casename = line.strip(patterns['pass']).strip()
+ junit.append(junit_testcase_ok(test_name, casename))
+ count[test_name]['pass'] += 1
+ elif line.startswith(patterns['skip']):
+ reason = ""
+ casename = line.strip(patterns['skip']).strip()
+ junit.append(junit_testcase_skip(test_name, casename))
+ count[test_name]['skip'] += 1
+ elif line.startswith(patterns['fail']):
+ casename = line.strip(patterns['fail']).strip()
+ junit.append(junit_testcase_fail(test_name, casename, reason))
+ count[test_name]['fail'] += 1
+ reason = ""
+ elif line.startswith(patterns['xfail']):
+ casename = line.strip(patterns['xfail']).strip()
+ junit.append(junit_testcase_xfail(test_name, casename, reason))
+ count[test_name]['pass'] += 1
+ reason = ""
+ elif line.startswith(patterns['elapsed']):
+ reason = ""
+ elapsed = line.split(' ')[2].strip()
+ (hrs, mins, secs) = elapsed.split(':')
+ secs_taken = int(hrs)*24 + int(mins)*60 + float(secs)
+ count[test_name]['elapsed'] = secs_taken
+
+ junit_str = update_stat(test_name, junit, count)
+ test_junit_file = os.path.join(output_dir,
+ "%s.junit.xml" % test_name)
+ w_fp = open (test_junit_file, 'w')
+ w_fp.writelines(junit_str)
+ w_fp.close()
+ junit = []
+ elif len(line):
+ reason = "%s\n%s" % (reason, line)
+ fp.close()
+
+def usage(errorMsg=None):
+ script_name = os.path.basename(sys.argv[0])
+ sys.stdout.write("""USAGE: %s: [--help|h] --log-file|l --output-dir|d
+
+Options:
+ --help|-h Display help message
+ --log-file|l The log file to parse for generating junit xml files
+ --output-dir|d The directory to create the junit xml file for each
+ test
+""" % script_name)
+ if errorMsg is not None:
+ sys.stderr.write("\nERROR: %s\n" % errorMsg)
+ sys.exit(1)
+ sys.exit(0)
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/dev/gnuify-changelog.pl b/tools/dev/gnuify-changelog.pl
new file mode 100755
index 0000000..a4112c7
--- /dev/null
+++ b/tools/dev/gnuify-changelog.pl
@@ -0,0 +1,164 @@
+#!/usr/bin/perl -w
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+# a script to munge the output of 'svn log' into something approaching the
+# style of a GNU ChangeLog.
+#
+# to use this, just fill in the 'hackers' hash with the usernames and
+# name/emails of the people who work on your project, go to the top level
+# of your working copy, and run:
+#
+# $ svn log | /path/to/gnuify-changelog.pl > ChangeLog
+
+require 5.0;
+use strict;
+
+my %hackers = (
+ "svn" => 'Collab.net Subversion Team',
+ "jimb" => 'Jim Blandy <jimb@redhat.com>',
+ "sussman" => 'Ben Collins-Sussman <sussman@collab.net>',
+ "kfogel" => 'Karl Fogel <kfogel@collab.net>',
+ "gstein" => 'Greg Stein <gstein@lyra.org>',
+ "brane" => 'Branko Cibej <brane@xbc.nu>',
+ "joe" => 'Joe Orton <joe@light.plus.com>',
+ "ghudson" => 'Greg Hudson <ghudson@mit.edu>',
+ "lefty" => 'Lee P. W. Burgess <lefty@red-bean.com>',
+ "fitz" => 'Brian Fitzpatrick <fitz@red-bean.com>',
+ "mab" => 'Matthew Braithwaite <matt@braithwaite.net>',
+ "daniel" => 'Daniel Stenberg <daniel@haxx.se>',
+ "mmurphy" => 'Mark Murphy <mmurphy@collab.net>',
+ "cmpilato" => 'C. Michael Pilato <cmpilato@collab.net>',
+ "kevin" => 'Kevin Pilch-Bisson <kevin@pilch-bisson.net>',
+ "philip" => 'Philip Martin <philip@codematters.co.uk>',
+ "jerenkrantz" => 'Justin Erenkrantz <jerenkrantz@apache.org>',
+ "rooneg" => 'Garrett Rooney <rooneg@electricjellyfish.net>',
+ "bcollins" => 'Ben Collins <bcollins@debian.org>',
+ "blair" => 'Blair Zajac <blair@orcaware.com>',
+ "striker" => 'Sander Striker <striker@apache.org>',
+ "XelaRellum" => 'Alexander Mueller <alex@littleblue.de>',
+ "yoshiki" => 'Yoshiki Hayashi <yoshiki@xemacs.org>',
+ "david" => 'David Summers <david@summersoft.fay.ar.us>',
+ "rassilon" => 'Bill Tutt <rassilon@lyra.org>',
+ "kbohling" => 'Kirby C. Bohling <kbohling@birddog.com>',
+ "breser" => 'Ben Reser <ben@reser.org>',
+ "bliss" => 'Tobias Ringstrom <tobias@ringstrom.mine.nu>',
+ "dionisos" => 'Erik Huelsmann <e.huelsmann@gmx.net>',
+ "josander" => 'Jostein Andersen <jostein@josander.net>',
+ "julianfoad" => 'Julian Foad <julianfoad@btopenworld.com>',
+ "clkao" => 'Chia-Liang Kao <clkao@clkao.org>',
+ "xsteve" => 'Stefan Reichör <reichoer@web.de>',
+ "mbk" => 'Mark Benedetto King <mbk@lowlatency.com>',
+ "patrick" => 'Patrick Mayweg <mayweg@qint.de>',
+ "jrepenning" => 'Jack Repenning <jrepenning@collab.net>',
+ "epg" => 'Eric Gillespie <epg@pretzelnet.org>',
+ "dwhedon" => 'David Kimdon <David_Kimdon@alumni.hmc.edu>',
+ "djh" => 'D.J. Heap <dj@shadyvale.net>',
+ "mprice" => 'Michael Price <mprice@atl.lmco.com>',
+ "jszakmeister" => 'John Szakmeister <john@szakmeister.net>',
+ "bdenny" => 'Brian Denny <brian@briandenny.net>',
+ "rey4" => 'Russell Yanofsky <rey4@columbia.edu>',
+ "maxb" => 'Max Bowsher <maxb@ukf.net>',
+ "dlr" => 'Daniel Rall <dlr@finemaltcoding.com>',
+ "jaa" => 'Jani Averbach <jaa@iki.fi>',
+ "pll" => 'Paul Lussier <p.lussier@comcast.net>',
+ "shlomif" => 'Shlomi Fish <shlomif@vipe.technion.ac.il>',
+ "jpieper" => 'Josh Pieper <jpieper@andrew.cmu.edu>',
+ "dimentiy" => 'Dmitriy O. Popkov <dimentiy@dimentiy.info>',
+ "kellin" => 'Shamim Islam <files@poetryunlimited.com>',
+ "sergeyli" => 'Sergey A. Lipnevich <sergey@optimaltec.com>',
+ "kraai" => 'Matt Kraai <kraai@alumni.cmu.edu>',
+ "ballbach" => 'Michael Ballbach <ballbach@rten.net>',
+ "kon" => 'Kalle Olavi Niemitalo <kon@iki.fi>',
+ "knacke" => 'Kai Nacke <kai.nacke@redstar.de>',
+ "gthompson" => 'Glenn A. Thompson <gthompson@cdr.net>',
+ "jespersm" => 'Jesper Steen Møller <jesper@selskabet.org>',
+ "naked" => 'Nuutti Kotivuori <naked@iki.fi>',
+ "niemeyer" => 'Gustavo Niemeyer <niemeyer@conectiva.com>',
+ "trow" => 'Jon Trowbridge <trow@ximian.com>',
+ "mmacek" => 'Marko Macek <Marko.Macek@gmx.net>',
+ "zbrown" => 'Zack Brown <zbrown@tumblerings.org>',
+ "morten" => 'Morten Ludvigsen <morten@2ps.dk>',
+ "fmatias" => 'Féliciano Matias <feliciano.matias@free.fr>',
+ "nsd" => 'Nick Duffek <nick@duffek.com>',
+);
+
+my $parse_next_line = 0;
+my $last_line_empty = 0;
+my $last_rev = "";
+
+while (my $entry = <>) {
+
+ # Axe windows style line endings, since we should try to be consistent, and
+ # the repos has both styles in its log entries
+ $entry =~ s/\r\n$/\n/;
+
+ # Remove trailing whitespace
+ $entry =~ s/\s+$/\n/;
+
+ my $this_line_empty = $entry eq "\n";
+
+ # Avoid duplicate empty lines
+ next if $this_line_empty and $last_line_empty;
+
+ # Don't fail on valid dash-only lines
+ if ($entry =~ /^-+$/ and length($entry) >= 72) {
+
+ # We're at the start of a log entry, so we need to parse the next line
+ $parse_next_line = 1;
+
+ # Check to see if the final line of the commit message was blank,
+ # if not insert one
+ print "\n" if $last_rev ne "" and !$last_line_empty;
+
+ } elsif ($parse_next_line) {
+
+ # Transform from svn style to GNU style
+ $parse_next_line = 0;
+
+ my @parts = split (/ /, $entry);
+ $last_rev = $parts[0];
+ my $hacker = $parts[2];
+ my $tstamp = $parts[4];
+
+ # Use alias if we can't resolve to name, email
+ $hacker = $hackers{$hacker} if defined $hackers{$hacker};
+
+ printf "%s %s\n", $tstamp, $hacker;
+
+ } elsif ($this_line_empty) {
+
+ print "\n";
+
+ } else {
+
+ print "\t$entry";
+
+ }
+
+ $last_line_empty = $this_line_empty;
+}
+
+# As a HERE doc so it also sets the final changelog's coding
+print <<LOCAL;
+;; Local Variables:
+;; coding: utf-8
+;; End:
+LOCAL
+
+1;
diff --git a/tools/dev/graph-dav-servers.py b/tools/dev/graph-dav-servers.py
new file mode 100755
index 0000000..86ae475
--- /dev/null
+++ b/tools/dev/graph-dav-servers.py
@@ -0,0 +1,194 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+# graph-svn-dav.py by Brian W. Fitzpatrick <fitz@red-bean.com>
+#
+# This was originally a quick hack to make a pretty picture of svn DAV servers.
+#
+# I've dropped it in Subversion's repository at the request of Karl Fogel.
+#
+# Be warned this this script has many dependencies that don't ship with Python.
+
+import sys
+import os
+import fileinput
+import datetime
+import time
+import datetime
+from matplotlib import dates
+import matplotlib
+matplotlib.use('Agg')
+from matplotlib import pylab
+import Image
+
+OUTPUT_FILE = '../../www/images/svn-dav-securityspace-survey.png'
+OUTPUT_IMAGE_WIDTH = 800
+
+STATS = [
+ ('1/1/2003', 70),
+ ('2/1/2003', 158),
+ ('3/1/2003', 222),
+ ('4/1/2003', 250),
+ ('5/1/2003', 308),
+ ('6/1/2003', 369),
+ ('7/1/2003', 448),
+ ('8/1/2003', 522),
+ ('9/1/2003', 665),
+ ('10/1/2003', 782),
+ ('11/1/2003', 969),
+ ('12/1/2003', 1009),
+ ('1/1/2004', 1162),
+ ('2/1/2004', 1307),
+ ('3/1/2004', 1424),
+ ('4/1/2004', 1792),
+ ('5/1/2004', 2113),
+ ('6/1/2004', 2502),
+ ('7/1/2004', 2941),
+ ('8/1/2004', 3863),
+ ('9/1/2004', 4174),
+ ('10/1/2004', 4187),
+ ('11/1/2004', 4783),
+ ('12/1/2004', 4995),
+ ('1/1/2005', 5565),
+ ('2/1/2005', 6505),
+ ('3/1/2005', 7897),
+ ('4/1/2005', 8751),
+ ('5/1/2005', 9793),
+ ('6/1/2005', 11534),
+ ('7/1/2005', 12808),
+ ('8/1/2005', 13545),
+ ('9/1/2005', 15233),
+ ('10/1/2005', 17588),
+ ('11/1/2005', 18893),
+ ('12/1/2005', 20278),
+ ('1/1/2006', 21084),
+ ('2/1/2006', 23861),
+ ('3/1/2006', 26540),
+ ('4/1/2006', 29396),
+ ('5/1/2006', 33001),
+ ('6/1/2006', 35082),
+ ('7/1/2006', 38939),
+ ('8/1/2006', 40672),
+ ('9/1/2006', 46525),
+ ('10/1/2006', 54247),
+ ('11/1/2006', 63145),
+ ('12/1/2006', 68988),
+ ('1/1/2007', 77027),
+ ('2/1/2007', 84813),
+ ('3/1/2007', 95679),
+ ('4/1/2007', 103852),
+ ('5/1/2007', 117267),
+ ('6/1/2007', 133665),
+ ('7/1/2007', 137575),
+ ('8/1/2007', 155426),
+ ('9/1/2007', 159055),
+ ('10/1/2007', 169939),
+ ('11/1/2007', 180831),
+ ('12/1/2007', 187093),
+ ('1/1/2008', 199432),
+ ('2/1/2008', 221547),
+ ('3/1/2008', 240794),
+ ('4/1/2008', 255520),
+ ('5/1/2008', 269478),
+ ('6/1/2008', 286614),
+ ('7/1/2008', 294579),
+ ('8/1/2008', 307923),
+ ('9/1/2008', 254757),
+ ('10/1/2008', 268081),
+ ('11/1/2008', 299071),
+ ('12/1/2008', 330884),
+ ('1/1/2009', 369719),
+ ('2/1/2009', 378434),
+ ('3/1/2009', 390502),
+ ('4/1/2009', 408658),
+ ('5/1/2009', 407044),
+ ('6/1/2009', 406520),
+ ('7/1/2009', 334276),
+ ]
+
+
+def get_date(raw_date):
+ month, day, year = map(int, raw_date.split('/'))
+ return datetime.datetime(year, month, day)
+
+
+def get_ordinal_date(date):
+ # This is the only way I can get matplotlib to do the dates right.
+ return int(dates.date2num(get_date(date)))
+
+
+def load_stats():
+ dates = [get_ordinal_date(date) for date, value in STATS]
+ counts = [x[1] for x in STATS]
+
+ return dates, counts
+
+
+def draw_graph(dates, counts):
+ ###########################################################
+ # Drawing takes place here.
+ pylab.figure(1)
+
+ ax = pylab.subplot(111)
+ pylab.plot_date(dates, counts,
+ color='r', linestyle='-', marker='o', markersize=3)
+
+ ax.xaxis.set_major_formatter( pylab.DateFormatter('%Y') )
+ ax.xaxis.set_major_locator( pylab.YearLocator() )
+ ax.xaxis.set_minor_locator( pylab.MonthLocator() )
+ ax.set_xlim( (dates[0] - 92, dates[len(dates) - 1] + 92) )
+
+ ax.yaxis.set_major_formatter( pylab.FormatStrFormatter('%d') )
+
+ pylab.ylabel('Total # of Public DAV Servers')
+
+ lastdate = datetime.datetime.fromordinal(dates[len(dates) - 1]).strftime("%B %Y")
+ pylab.xlabel("Data as of " + lastdate)
+ pylab.title('Security Space Survey of\nPublic Subversion DAV Servers')
+ # End drawing
+ ###########################################################
+ png = open(OUTPUT_FILE, 'w')
+ pylab.savefig(png)
+ png.close()
+ os.rename(OUTPUT_FILE, OUTPUT_FILE + ".tmp.png")
+ try:
+ im = Image.open(OUTPUT_FILE + ".tmp.png", 'r')
+ (width, height) = im.size
+ print("Original size: %d x %d pixels" % (width, height))
+ scale = float(OUTPUT_IMAGE_WIDTH) / float(width)
+ width = OUTPUT_IMAGE_WIDTH
+ height = int(float(height) * scale)
+ print("Final size: %d x %d pixels" % (width, height))
+ im = im.resize((width, height), Image.ANTIALIAS)
+ im.save(OUTPUT_FILE, im.format)
+ os.unlink(OUTPUT_FILE + ".tmp.png")
+ except Exception as e:
+ sys.stderr.write("Error attempting to resize the graphic: %s\n" % (str(e)))
+ os.rename(OUTPUT_FILE + ".tmp.png", OUTPUT_FILE)
+ raise
+ pylab.close()
+
+
+if __name__ == '__main__':
+ dates, counts = load_stats()
+ draw_graph(dates, counts)
+ print("Don't forget to update ../../www/svn-dav-securityspace-survey.html!")
diff --git a/tools/dev/histogram.py b/tools/dev/histogram.py
new file mode 100755
index 0000000..1923c78
--- /dev/null
+++ b/tools/dev/histogram.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import sys
+import operator
+
+
+def count(infile):
+ # infile should be a simple file with author names on each line
+ counts = {}
+ for line in infile:
+ author = line.strip()
+ counts[author] = counts.get(author, 0) + 1
+
+ return counts
+
+
+def histogram(counts, width):
+ max_len = max([len(author) for author in counts.keys()])
+ max_count = max(counts.values())
+
+ adjustor = float(max_count) / (width - max_len - 3)
+
+ for author, count in sorted(counts.items(),
+ key=operator.itemgetter(1), # sort on count
+ reverse=True):
+ print("%-*s | %s" % (max_len, author, "X"*int(count/adjustor)))
+
+
+if __name__ == '__main__':
+ if len(sys.argv) < 2:
+ ### TODO: Automagically determine terminal width
+ width = 80
+ else:
+ width = int(sys.argv[1])
+ histogram(count(sys.stdin), width)
diff --git a/tools/dev/iz/defect.dem b/tools/dev/iz/defect.dem
new file mode 100644
index 0000000..7756b7c
--- /dev/null
+++ b/tools/dev/iz/defect.dem
@@ -0,0 +1,6 @@
+set title "Subversion DEFECT Activity"
+set boxwidth 0.5
+set data style lines
+set key 10, 60
+plot "/tmp/points.found.DEFECT" title "found" with boxes, "/tmp/points.fixed.DEFECT" title "fixed" with boxes, "/tmp/points.avg.DEFECT" title "moving avg", "/tmp/points.open.DEFECT" title "open"
+pause -1 "Hit return to continue"
diff --git a/tools/dev/iz/ff2csv.command b/tools/dev/iz/ff2csv.command
new file mode 100755
index 0000000..6826e34
--- /dev/null
+++ b/tools/dev/iz/ff2csv.command
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+# MacOS X do-hickie to run ff2csv.py, with parameters, by double-click.
+
+
+flags="hq"
+Usage () {
+ args="$*"
+ if [[ -n "$args" ]] ; then
+ echo >&2 "$args"
+ fi
+ echo >&2 "Usage: $0 [-$flags] [querysetfile [csvfile]]
+Run ff2csv.py, fetching and summarizing SVN bug status."
+}
+while getopts $flags flag; do
+ case "$flag" in
+ h|q) Usage; exit 0;;
+ esac
+done
+
+# we want to run in the same folder as this script, not
+# the users home folder
+cd `dirname $0`
+
+
+date=`date +%m%d`
+./ff2csv.py ${1:-query-set-1-$date.tsv} ${2:-core-history-$date.csv}
diff --git a/tools/dev/iz/ff2csv.py b/tools/dev/iz/ff2csv.py
new file mode 100755
index 0000000..dca127e
--- /dev/null
+++ b/tools/dev/iz/ff2csv.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# -*- Python -*-
+"""Transform find-fix.py output into Excellable csv."""
+
+__date__ = "Time-stamp: <2003-10-16 13:26:27 jrepenning>"[13:30]
+__author__ = "Jack Repenning <jrepenning@collab.net>"
+
+import getopt
+try:
+ my_getopt = getopt.gnu_getopt
+except AttributeError:
+ my_getopt = getopt.getopt
+import inspect
+import os
+import os.path
+import pydoc
+import re
+import shutil
+import string
+import sys
+import time
+
+# Long options and their usage strings; "=" means it takes an argument.
+# To get a list suitable for getopt, just do
+#
+# [x[0] for x in long_opts]
+#
+# Make sure to sacrifice a lamb to Guido for each element of the list.
+long_opts = [
+ ["doc", """Optional, print pydocs."""],
+ ["help", """Optional, print usage (this text)."""],
+ ["verbose", """Optional, print more progress messages."""],
+ ]
+
+help = 0
+verbose = 0
+me = os.path.basename(sys.argv[0])
+
+DATA_FILE = "http://subversion.tigris.org/iz-data/query-set-1.tsv"
+
+def main():
+ """Run find-fix.py with arguments du jour for drawing pretty
+manager-speak pictures."""
+
+ global verbose
+
+ try:
+ opts, args = my_getopt(sys.argv[1:], "", [x[0] for x in long_opts])
+ except getopt.GetoptError as e:
+ print("Error: %s" % e.msg)
+ shortusage()
+ print(me + " --help for options.")
+ sys.exit(1)
+
+ for opt, arg in opts:
+ if opt == "--help":
+ usage()
+ sys.exit(0)
+ elif opt == "--verbose":
+ verbose = 1
+ elif opt == "--doc":
+ pydoc.doc(pydoc.importfile(sys.argv[0]))
+ sys.exit(0)
+
+ # do something fruitful with your life
+ if len(args) == 0:
+ args = ["query-set-1.tsv", "core-history.csv"]
+ print(("ff2csv %s %s" % args))
+
+ if len(args) != 2:
+ print("%s: Wrong number of args." % me)
+ shortusage()
+ sys.exit(1)
+
+ if os.system("curl " + DATA_FILE + "> " + args[0]):
+ os.system("wget " + DATA_FILE)
+
+ outfile = open(args[1], "w")
+ outfile.write("Date,found,fixed,inval,dup,other,remain\n")
+
+ totalsre = re.compile("totals:.*found= +([0-9]+) +"
+ "fixed= +([0-9]+) +"
+ "inval= +([0-9]+) +"
+ "dup= +([0-9]+) +"
+ "other= +([0-9]+) +"
+ "remain= *([0-9]+)")
+ for year in ("2001", "2002", "2003", "2004"):
+ for month in ("01", "02", "03", "04", "05", "06", "07", "08",
+ "09", "10", "11", "12"):
+ for dayrange in (("01", "08"),
+ ("08", "15"),
+ ("15", "22"),
+ ("22", "28")):
+ if verbose:
+ print("searching %s-%s-%s to %s" % (year,
+ month,
+ dayrange[0],
+ dayrange[1]))
+ ffpy = os.popen("python ./find-fix.py --m=beta "
+ "%s %s-%s-%s %s-%s-%s"
+ % (args[0],
+ year, month, dayrange[0],
+ year, month, dayrange[1]))
+ if verbose:
+ print("ffpy: %s" % ffpy)
+
+ line = ffpy.readline()
+ if verbose:
+ print("initial line is: %s" % line)
+ matches = totalsre.search(line)
+ if verbose:
+ print("initial match is: %s" % matches)
+ while line and not matches:
+ line = ffpy.readline()
+ if verbose:
+ print("%s: read line '%s'" % (me, line))
+ matches = totalsre.search(line)
+ if verbose:
+ print("subsequent line is: %s" % line)
+
+ ffpy.close()
+
+ if verbose:
+ print("line is %s" % line)
+
+ if matches.group(1) != "0" \
+ or matches.group(2) != "0" \
+ or matches.group(3) != "0" \
+ or matches.group(4) != "0" \
+ or matches.group(5) != "0":
+
+ outfile.write("%s-%s-%s,%s,%s,%s,%s,%s,%s\n"
+ % (year, month, dayrange[1],
+ matches.group(1),
+ matches.group(2),
+ matches.group(3),
+ matches.group(4),
+ matches.group(5),
+ matches.group(6),
+ ))
+ elif matches.group(6) != "0":
+ # quit at first nothing-done week
+ # allows slop in loop controls
+ break
+ outfile.close()
+
+
+def shortusage():
+ "Print one-line usage summary."
+ print("%s - %s" % (me, pydoc.synopsis(sys.argv[0])))
+
+def usage():
+ "Print multi-line usage tome."
+ shortusage()
+ print('''%s [opts] [queryfile [outfile]]
+Option keywords may be abbreviated to any unique prefix.
+Option order is not important.
+Most options require "=xxx" arguments:''' % me)
+ for x in long_opts:
+ padding_limit = 18
+ if x[0][-1:] == '=':
+ sys.stdout.write(" --%s " % x[0][:-1])
+ padding_limit = 19
+ else:
+ sys.stdout.write(" --%s " % x[0])
+ print("%s %s" % ((' ' * (padding_limit - len(x[0]))), x[1]))
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/dev/iz/find-fix.py b/tools/dev/iz/find-fix.py
new file mode 100755
index 0000000..8761b8e
--- /dev/null
+++ b/tools/dev/iz/find-fix.py
@@ -0,0 +1,454 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# -*- Python -*-
+"""find-fix.py: produce a find/fix report for Subversion's IZ database
+
+For simple text summary:
+ find-fix.py query-set-1.tsv YYYY-MM-DD YYYY-MM-DD
+Statistics will be printed for bugs found or fixed within the
+time frame.
+
+For gnuplot presentation:
+ find-fix.py query-set-1.tsv outfile
+Gnuplot provides its own way to select date ranges.
+
+Either way, get a query-set-1.tsv from:
+ http://subversion.tigris.org/iz-data/query-set-1.tsv (updated nightly)
+See http://subversion.tigris.org/iz-data/README for more info on that file.
+
+For more usage info on this script:
+ find-fix.py --help
+"""
+
+_version = "$Revision:"
+
+#
+# This can be run over the data file found at:
+# http://subversion.tigris.org/iz-data/query-set-1.tsv
+#
+
+import getopt
+try:
+ my_getopt = getopt.gnu_getopt
+except AttributeError:
+ my_getopt = getopt.getopt
+import operator
+import os
+import os.path
+import pydoc
+import re
+try:
+ # Python >=2.6
+ from functools import reduce
+except ImportError:
+ # Python <2.6
+ pass
+import sys
+import time
+
+me = os.path.basename(sys.argv[0])
+
+# Long options and their usage strings; "=" means it takes an argument.
+# To get a list suitable for getopt, just do
+#
+# [x[0] for x in long_opts]
+#
+# Make sure to sacrifice a lamb to Guido for each element of the list.
+long_opts = [
+ ["milestones=", """Optional, milestones NOT to report on
+ (one or more of Beta, 1.0, Post-1.0, cvs2svn-1.0, cvs2svn-opt,
+ inapplicable)"""],
+ ["update", """Optional, update the statistics first."""],
+ ["doc", """Optional, print pydocs."""],
+ ["help", """Optional, print usage (this text)."""],
+ ["verbose", """Optional, print more progress messages."""],
+ ]
+
+help = 0
+verbose = 0
+update = 0
+
+DATA_FILE = "http://subversion.tigris.org/iz-data/query-set-1.tsv"
+ONE_WEEK = 7 * 24 * 60 * 60
+
+_types = []
+_milestone_filter = []
+
+noncore_milestone_filter = [
+ 'Post-1.0',
+ '1.1',
+ 'cvs2svn-1.0',
+ 'cvs2svn-opt',
+ 'inapplicable',
+ 'no milestone',
+ ]
+
+one_point_oh_milestone_filter = noncore_milestone_filter + []
+
+beta_milestone_filter = one_point_oh_milestone_filter + ['1.0']
+
+
+_types = [
+ 'DEFECT',
+ 'TASK',
+ 'FEATURE',
+ 'ENHANCEMENT',
+ 'PATCH',
+ ]
+
+
+def main():
+ """Report bug find/fix rate statistics for Subversion."""
+
+ global verbose
+ global update
+ global _types
+ global _milestone_filter
+ global noncore_milestone_filter
+
+ try:
+ opts, args = my_getopt(sys.argv[1:], "", [x[0] for x in long_opts])
+ except getopt.GetoptError as e:
+ sys.stderr.write("Error: %s\n" % e.msg)
+ shortusage()
+ sys.stderr.write("%s --help for options.\n" % me)
+ sys.exit(1)
+
+ for opt, arg in opts:
+ if opt == "--help":
+ usage()
+ sys.exit(0)
+ elif opt == "--verbose":
+ verbose = 1
+ elif opt == "--milestones":
+ for mstone in arg.split(","):
+ if mstone == "noncore":
+ _milestone_filter = noncore_milestone_filter
+ elif mstone == "beta":
+ _milestone_filter = beta_milestone_filter
+ elif mstone == "one":
+ _milestone_filter = one_point_oh_milestone_filter
+ elif mstone[0] == '-':
+ if mstone[1:] in _milestone_filter:
+ spot = _milestone_filter.index(mstone[1:])
+ _milestone_filter = _milestone_filter[:spot] \
+ + _milestone_filter[(spot+1):]
+ else:
+ _milestone_filter += [mstone]
+
+ elif opt == "--update":
+ update = 1
+ elif opt == "--doc":
+ pydoc.doc(pydoc.importfile(sys.argv[0]))
+ sys.exit(0)
+
+ if len(_milestone_filter) == 0:
+ _milestone_filter = noncore_milestone_filter
+
+ if verbose:
+ sys.stderr.write("%s: Filtering out milestones %s.\n"
+ % (me, ", ".join(_milestone_filter)))
+
+ if len(args) == 2:
+ if verbose:
+ sys.stderr.write("%s: Generating gnuplot data.\n" % me)
+ if update:
+ if verbose:
+ sys.stderr.write("%s: Updating %s from %s.\n" % (me, args[0], DATA_FILE))
+ if os.system("curl " + DATA_FILE + "> " + args[0]):
+ os.system("wget " + DATA_FILE)
+ plot(args[0], args[1])
+
+ elif len(args) == 3:
+ if verbose:
+ sys.stderr.write("%s: Generating summary from %s to %s.\n"
+ % (me, args[1], args[2]))
+ if update:
+ if verbose:
+ sys.stderr.write("%s: Updating %s from %s.\n" % (me, args[0], DATA_FILE))
+ if os.system("curl " + DATA_FILE + "> " + args[0]):
+ os.system("wget " + DATA_FILE)
+
+ try:
+ t_start = parse_time(args[1] + " 00:00:00")
+ except ValueError:
+ sys.stderr.write('%s: ERROR: bad time value: %s\n' % (me, args[1]))
+ sys.exit(1)
+
+ try:
+ t_end = parse_time(args[2] + " 00:00:00")
+ except ValueError:
+ sys.stderr.write('%s: ERROR: bad time value: %s\n' % (me, args[2]))
+ sys.exit(1)
+
+ summary(args[0], t_start, t_end)
+ else:
+ usage()
+
+ sys.exit(0)
+
+
+def summary(datafile, d_start, d_end):
+ "Prints a summary of activity within a specified date range."
+
+ data = load_data(datafile)
+
+ # activity during the requested period
+ found, fixed, inval, dup, other = extract(data, 1, d_start, d_end)
+
+ # activity from the beginning of time to the end of the request
+ # used to compute remaining
+ # XXX It would be faster to change extract to collect this in one
+ # pass. But we don't presently have enough data, nor use this
+ # enough, to justify that rework.
+ fromzerofound, fromzerofixed, fromzeroinval, fromzerodup, fromzeroother \
+ = extract(data, 1, 0, d_end)
+
+ alltypes_found = alltypes_fixed = alltypes_inval = alltypes_dup \
+ = alltypes_other = alltypes_rem = 0
+ for t in _types:
+ fromzerorem_t = fromzerofound[t]\
+ - (fromzerofixed[t] + fromzeroinval[t] + fromzerodup[t]
+ + fromzeroother[t])
+ print('%12s: found=%3d fixed=%3d inval=%3d dup=%3d ' \
+ 'other=%3d remain=%3d' \
+ % (t, found[t], fixed[t], inval[t], dup[t], other[t], fromzerorem_t))
+ alltypes_found = alltypes_found + found[t]
+ alltypes_fixed = alltypes_fixed + fixed[t]
+ alltypes_inval = alltypes_inval + inval[t]
+ alltypes_dup = alltypes_dup + dup[t]
+ alltypes_other = alltypes_other + other[t]
+ alltypes_rem = alltypes_rem + fromzerorem_t
+
+ print('-' * 77)
+ print('%12s: found=%3d fixed=%3d inval=%3d dup=%3d ' \
+ 'other=%3d remain=%3d' \
+ % ('totals', alltypes_found, alltypes_fixed, alltypes_inval,
+ alltypes_dup, alltypes_other, alltypes_rem))
+ # print '%12s find/fix ratio: %g%%' \
+ # % (" "*12, (alltypes_found*100.0/(alltypes_fixed
+ # + alltypes_inval + alltypes_dup + alltypes_other)))
+
+
+def plot(datafile, outbase):
+ "Generates data files intended for use by gnuplot."
+
+ global _types
+
+ data = load_data(datafile)
+
+ t_min = 1L<<32
+ for issue in data:
+ if issue.created < t_min:
+ t_min = issue.created
+
+ # break the time up into a tuple, then back up to Sunday
+ t_start = time.localtime(t_min)
+ t_start = time.mktime((t_start[0], t_start[1], t_start[2] - t_start[6] - 1,
+ 0, 0, 0, 0, 0, 0))
+
+ plots = { }
+ for t in _types:
+ # for each issue type, we will record per-week stats, compute a moving
+ # average of the find/fix delta, and track the number of open issues
+ plots[t] = [ [ ], MovingAverage(), 0 ]
+
+ week = 0
+ for date in range(t_start, time.time(), ONE_WEEK):
+ ### this is quite inefficient, as we could just sort by date, but
+ ### I'm being lazy
+ found, fixed = extract(data, None, date, date + ONE_WEEK - 1)
+
+ for t in _types:
+ per_week, avg, open_issues = plots[t]
+ delta = found[t] - fixed[t]
+ per_week.append((week, date,
+ found[t], -fixed[t], avg.add(delta), open_issues))
+ plots[t][2] = open_issues + delta
+
+ week = week + 1
+
+ for t in _types:
+ week_data = plots[t][0]
+ write_file(week_data, outbase, t, 'found', 2)
+ write_file(week_data, outbase, t, 'fixed', 3)
+ write_file(week_data, outbase, t, 'avg', 4)
+ write_file(week_data, outbase, t, 'open', 5)
+
+def write_file(week_data, base, type, tag, idx):
+ f = open('%s.%s.%s' % (base, tag, type), 'w')
+ for info in week_data:
+ f.write('%s %s # %s\n' % (info[0], info[idx], time.ctime(info[1])))
+
+
+class MovingAverage:
+ "Helper class to compute moving averages."
+ def __init__(self, n=4):
+ self.n = n
+ self.data = [ 0 ] * n
+ def add(self, value):
+ self.data.pop(0)
+ self.data.append(float(value) / self.n)
+ return self.avg()
+ def avg(self):
+ return reduce(operator.add, self.data)
+
+
+def extract(data, details, d_start, d_end):
+ """Extract found/fixed counts for each issue type within the data range.
+
+ If DETAILS is false, then return two dictionaries:
+
+ found, fixed
+
+ ...each mapping issue types to the number of issues of that type
+ found or fixed respectively.
+
+ If DETAILS is true, return five dictionaries:
+
+ found, fixed, invalid, duplicate, other
+
+ The first is still the found issues, but the other four break down
+ the resolution into 'FIXED', 'INVALID', 'DUPLICATE', and a grab-bag
+ category for 'WORKSFORME', 'LATER', 'REMIND', and 'WONTFIX'."""
+
+ global _types
+ global _milestone_filter
+
+ found = { }
+ fixed = { }
+ invalid = { }
+ duplicate = { }
+ other = { } # "WORKSFORME", "LATER", "REMIND", and "WONTFIX"
+
+ for t in _types:
+ found[t] = fixed[t] = invalid[t] = duplicate[t] = other[t] = 0
+
+ for issue in data:
+ # filter out disrespected milestones
+ if issue.milestone in _milestone_filter:
+ continue
+
+ # record the found/fixed counts
+ if d_start <= issue.created <= d_end:
+ found[issue.type] = found[issue.type] + 1
+ if d_start <= issue.resolved <= d_end:
+ if details:
+ if issue.resolution == "FIXED":
+ fixed[issue.type] = fixed[issue.type] + 1
+ elif issue.resolution == "INVALID":
+ invalid[issue.type] = invalid[issue.type] + 1
+ elif issue.resolution == "DUPLICATE":
+ duplicate[issue.type] = duplicate[issue.type] + 1
+ else:
+ other[issue.type] = other[issue.type] + 1
+ else:
+ fixed[issue.type] = fixed[issue.type] + 1
+
+ if details:
+ return found, fixed, invalid, duplicate, other
+ else:
+ return found, fixed
+
+
+def load_data(datafile):
+ "Return a list of Issue objects for the specified data."
+ return list(map(Issue, open(datafile).readlines()))
+
+
+class Issue:
+ "Represents a single issue from the exported IssueZilla data."
+
+ def __init__(self, line):
+ row = line.strip().split('\t')
+
+ self.id = int(row[0])
+ self.type = row[1]
+ self.reporter = row[2]
+ if row[3] == 'NULL':
+ self.assigned = None
+ else:
+ self.assigned = row[3]
+ self.milestone = row[4]
+ self.created = parse_time(row[5])
+ self.resolution = row[7]
+ if not self.resolution:
+ # If the resolution is empty, then force the resolved date to None.
+ # When an issue is reopened, there will still be activity showing
+ # a "RESOLVED", thus we get a resolved date. But we simply want to
+ # ignore that date.
+ self.resolved = None
+ else:
+ self.resolved = parse_time(row[6])
+ self.summary = row[8]
+
+
+parse_time_re = re.compile('([0-9]{4})-([0-9]{2})-([0-9]{2}) '
+ '([0-9]{2}):([0-9]{2}):([0-9]{2})')
+
+def parse_time(t):
+ "Convert an exported MySQL timestamp into seconds since the epoch."
+
+ global parse_time_re
+
+ if t == 'NULL':
+ return None
+ try:
+ matches = parse_time_re.match(t)
+ return time.mktime((int(matches.group(1)),
+ int(matches.group(2)),
+ int(matches.group(3)),
+ int(matches.group(4)),
+ int(matches.group(5)),
+ int(matches.group(6)),
+ 0, 0, -1))
+ except ValueError:
+ sys.stderr.write('ERROR: bad time value: %s\n'% t)
+ sys.exit(1)
+
+def shortusage():
+ print(pydoc.synopsis(sys.argv[0]))
+ print("""
+For simple text summary:
+ find-fix.py [options] query-set-1.tsv YYYY-MM-DD YYYY-MM-DD
+
+For gnuplot presentation:
+ find-fix.py [options] query-set-1.tsv outfile
+""")
+
+def usage():
+ shortusage()
+ for x in long_opts:
+ padding_limit = 18
+ if x[0][-1:] == '=':
+ sys.stdout.write(" --%s " % x[0][:-1])
+ padding_limit = 19
+ else:
+ sys.stdout.write(" --%s " % x[0])
+ print("%s %s" % ((' ' * (padding_limit - len(x[0]))), x[1]))
+ print('''
+Option keywords may be abbreviated to any unique prefix.
+Most options require "=xxx" arguments.
+Option order is not important.''')
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/dev/iz/run-queries.sh b/tools/dev/iz/run-queries.sh
new file mode 100755
index 0000000..990caf5
--- /dev/null
+++ b/tools/dev/iz/run-queries.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+if test $# != 3; then
+ echo "USAGE: $0 DATABASE_USER DATABASE_PASSWORD MYSQL_DATABASE"
+ exit 1
+fi
+
+dbuser="$1"
+dbpass="$2"
+dbdb="$3"
+
+q1='select issues.issue_id, issue_type, user1.LOGIN_NAME "reporter",
+ user2.LOGIN_NAME "assigned_to", target_milestone, creation_ts,
+ max(issue_when) "resolved_ts", resolution, short_desc
+ from issues left join issues_activity
+ on issues.issue_id=issues_activity.issue_id and newvalue="RESOLVED",
+ profiles prof1,
+ profiles prof2 left join tigris.HELM_USER user1
+ on user1.USER_ID=prof1.helm_user_id
+ left join tigris.HELM_USER user2
+ on user2.USER_ID=prof2.helm_user_id
+ where prof1.userid=reporter and prof2.userid=assigned_to
+ group by issues.issue_id
+ order by issues.issue_id'
+
+q2='select issues.issue_id, issue_type, user1.LOGIN_NAME "reporter",
+ user2.LOGIN_NAME "assigned_to", target_milestone, creation_ts,
+ max(issue_when) "resolved_ts", resolution, short_desc,
+ priority
+ from issues left join issues_activity
+ on issues.issue_id=issues_activity.issue_id and newvalue="RESOLVED",
+ profiles prof1,
+ profiles prof2 left join tigris.HELM_USER user1
+ on user1.USER_ID=prof1.helm_user_id
+ left join tigris.HELM_USER user2
+ on user2.USER_ID=prof2.helm_user_id
+ where prof1.userid=reporter and prof2.userid=assigned_to
+ group by issues.issue_id
+ order by issues.issue_id'
+
+mysql --batch -e "use $dbdb; $q1" --user=$dbuser --password=$dbpass --silent > iz-data/query-set-1.tsv
+mysql --batch -e "use $dbdb; $q2" --user=$dbuser --password=$dbpass --silent > iz-data/query-set-2.tsv
diff --git a/tools/dev/lock-check.py b/tools/dev/lock-check.py
new file mode 100755
index 0000000..710bf48
--- /dev/null
+++ b/tools/dev/lock-check.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+### Repository lock checker. Gets an exclusive lock on the provided
+### repository, then runs db_stat to see if the lock counts have been
+### reset to 0. If not, prints the timestamp of the run and a message
+### about accumulation.
+
+DB_STAT = 'db_stat'
+
+
+import sys
+import os
+import os.path
+import time
+import fcntl
+import getopt
+try:
+ my_getopt = getopt.gnu_getopt
+except AttributeError:
+ my_getopt = getopt.getopt
+
+def usage_and_exit(retval):
+ if retval:
+ out = sys.stderr
+ else:
+ out = sys.stdout
+ out.write("""Usage: %s [OPTIONS] REPOS-PATH
+
+Options:
+ --help (-h) : Show this usage message
+ --non-blocking : Don't wait for a lock that can't be immediately obtained
+
+Obtain an exclusive lock (waiting for one unless --non-blocking is
+passed) on REPOS-PATH, then check its lock usage counts. If there is
+any accumulation present, report that accumulation to stdout.
+""" % (os.path.basename(sys.argv[0])))
+ sys.exit(retval)
+
+def main():
+ now_time = time.asctime()
+ repos_path = None
+ nonblocking = 0
+
+ # Parse the options.
+ optlist, args = my_getopt(sys.argv[1:], "h", ['non-blocking', 'help'])
+ for opt, arg in optlist:
+ if opt == '--help' or opt == '-h':
+ usage_and_exit(0)
+ if opt == '--non-blocking':
+ nonblocking = 1
+ else:
+ usage_and_exit(1)
+
+ # We need at least a path to work with, here.
+ argc = len(args)
+ if argc < 1 or argc > 1:
+ usage_and_exit(1)
+ repos_path = args[0]
+
+ fd = open(os.path.join(repos_path, 'locks', 'db.lock'), 'a')
+ try:
+ # Get an exclusive lock on the repository lock file, but maybe
+ # don't wait for it.
+ try:
+ mode = fcntl.LOCK_EX
+ if nonblocking:
+ mode = mode | fcntl.LOCK_NB
+ fcntl.lockf(fd, mode)
+ except IOError:
+ sys.stderr.write("Error obtaining exclusive lock.\n")
+ sys.exit(1)
+
+ # Grab the db_stat results.
+ lines = os.popen('%s -ch %s' % (DB_STAT, os.path.join(repos_path, 'db')))
+ log_lines = []
+ for line in lines:
+ pieces = line.split('\t')
+ if (pieces[1].find('current lock') != -1) and (int(pieces[0]) > 0):
+ log = ''
+ if not len(log_lines):
+ log = log + "[%s] Lock accumulation for '%s'\n" \
+ % (now_time, repos_path)
+ log = log + ' ' * 27
+ log = log + "%s\t%s" % (pieces[0], pieces[1])
+ log_lines.append(log)
+ if len(log_lines):
+ sys.stdout.write(''.join(log_lines))
+ finally:
+ # Unlock the lockfile
+ fcntl.lockf(fd, fcntl.LOCK_UN)
+ fd.close()
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/dev/log_revnum_change_asf.py b/tools/dev/log_revnum_change_asf.py
new file mode 100755
index 0000000..30f5507
--- /dev/null
+++ b/tools/dev/log_revnum_change_asf.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+Script to change old (svn.collab.net) revision numbers in subversion log
+messages to new ASF subversion repository revision numbers.
+"""
+
+USAGE = """python log_revnum_change_asf.py [OPTION]... URL
+
+Change the revision numbers relatively in the log messages of new ASF
+subversion repository.
+"""
+
+from csvn.repos import RemoteRepository
+from csvn.auth import User
+import csvn.core
+from optparse import OptionParser
+import sys
+import re
+
+def repl_newrev(matchobj):
+ """
+ Revision to be substituted is provided here.
+ """
+ if matchobj.group(0):
+ old_rev = int(matchobj.group(0)[1:])
+ if old_rev <= 45000:
+ return 'r'+str(old_rev + 840074)
+ else:
+ return 'r'+str(old_rev)
+
+def main():
+ """
+ Script execution starts here.
+ """
+
+ parser = OptionParser(usage=USAGE)
+ parser.add_option("-u", "", dest="username",
+ help="commit the changes as USERNAME")
+ parser.add_option("-p", "", dest="password",
+ help="commit the changes with PASSWORD")
+ parser.add_option("-r", "", dest="rev",
+ help="revision range")
+
+ (options, args) = parser.parse_args()
+
+ if len(args) != 1:
+ parser.print_help()
+ sys.exit(1)
+
+ csvn.core.svn_cmdline_init("", csvn.core.stderr)
+ repos_url = args[0]
+ revs = options.rev
+ if revs and ":" in revs:
+ [start_rev, end_rev] = revs.split(":")
+ elif revs:
+ start_rev = revs
+ end_rev = revs
+ else:
+ start_rev = 1
+ end_rev = "HEAD"
+
+ session = RemoteRepository(repos_url, user=User(options.username,
+ options.password))
+
+ if end_rev == "HEAD":
+ end_rev = session.latest_revnum()
+ if start_rev == "HEAD":
+ start_rev = session.latest_revnum()
+ start_rev = int(start_rev)
+ end_rev = int(end_rev)
+
+ for entry in session.log(start_rev, end_rev):
+ new_log = re.sub(r'(r\d+)', repl_newrev, entry.message)
+ session.revprop_set(propname='svn:log',
+ propval=new_log,
+ revnum=entry.revision,
+ force=True)
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/dev/merge-graph.py b/tools/dev/merge-graph.py
new file mode 100755
index 0000000..f587de8
--- /dev/null
+++ b/tools/dev/merge-graph.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+
+args_message = '[-f png|svg|gif|dia... [-f ...]] GRAPH_CONFIG_FILE...'
+help_message = """Produce pretty graphs representing branches and merging.
+For each config file specified, construct a graph and write it as a PNG file
+(or other graphical file formats)."""
+
+import sys
+import getopt
+from mergegraph import MergeDot
+
+
+# If run as a program, process each input filename as a graph config file.
+if __name__ == '__main__':
+ optlist, args = getopt.getopt(sys.argv[1:], 'f:', ['format'])
+
+ prog_name = sys.argv[0]
+ if not args:
+ usage = '%s: usage: "%s %s"\n' % (prog_name, prog_name, args_message)
+ sys.stderr.write(usage)
+ sys.exit(1)
+
+ formats = []
+
+ for opt, opt_arg in optlist:
+ if opt == '-f':
+ formats.append(opt_arg)
+
+ if not formats:
+ formats.append('png')
+
+ for config_filename in args:
+ sys.stdout.write("%s: reading '%s', " % (prog_name, config_filename))
+ graph = MergeDot(config_filename, rankdir='LR', dpi='72')
+ for format in formats:
+ filename = '%s.%s' % (graph.basename, format)
+ sys.stdout.write("writing '%s' " % filename)
+ graph.save(format=format, filename=filename)
+ print
diff --git a/tools/dev/mergegraph/__init__.py b/tools/dev/mergegraph/__init__.py
new file mode 100644
index 0000000..6dfb7d9
--- /dev/null
+++ b/tools/dev/mergegraph/__init__.py
@@ -0,0 +1,20 @@
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+
+from mergegraph import MergeDot, MergeGraph, MergeSubgraph
diff --git a/tools/dev/mergegraph/mergegraph.py b/tools/dev/mergegraph/mergegraph.py
new file mode 100644
index 0000000..c2e5523
--- /dev/null
+++ b/tools/dev/mergegraph/mergegraph.py
@@ -0,0 +1,313 @@
+#!/usr/bin/env python
+
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+
+# Config file format:
+example = """
+ [graph]
+ filename = merge-sync-1.png
+ title = Sync Merge: CC vs SVN
+ # Branches: (branch name, branched from node, first rev, last rev).
+ branches = [
+ ('A', 'O0', 1, 4),
+ ('O', None, 0, 0),
+ ('B', 'O0', 1, 5)
+ ]
+ # Changes: nodes in which a change was committed; merge targets need not
+ # be listed here.
+ changes = [
+ 'A1', 'A2', 'A3', 'A4',
+ 'B1', 'B2', 'B3', 'B4', 'B5'
+ ]
+ # Merges: (base node, source-right node, target node, label).
+ # Base is also known as source-left.
+ merges = [
+ ('O0', 'A:1', 'B3', 'sync'),
+ ('A2', 'A:3', 'B5', 'sync'),
+ ]
+ # Annotations for nodes: (node, annotation text).
+ annotations = [
+ ('A2', 'cc:YCA')
+ ]
+"""
+
+# Notes about different kinds of merge.
+#
+# A basic 3-way merge is ...
+#
+# The ClearCase style of merge is a 3-way merge.
+#
+# The Subversion style of merge (that is, one phase of a Subversion merge)
+# is a three-way merge with its base (typically the YCA) on the source branch.
+
+
+import sys
+import pydot
+from pydot import Node, Edge
+
+
+def mergeinfo_to_node_list(mi):
+ """Convert a mergeinfo string such as '/foo:1,3-5*' into a list of
+ node names such as ['foo1', 'foo3', 'foo4', 'foo5'].
+ """
+ ### Doesn't yet strip the leading slash.
+ l = []
+ if mi:
+ for mi_str in mi.split(' '):
+ path, ranges = mi_str.split(':')
+ for r in ranges.split(','):
+ if r.endswith('*'):
+ # TODO: store & use this 'non-inheritable' flag
+ # Remove the flag
+ r = r[:-1]
+ rlist = r.split('-')
+ r1 = int(rlist[0])
+ if len(rlist) == 2:
+ r2 = int(rlist[1])
+ else:
+ r2 = r1
+ for rev in range(r1, r2 + 1):
+ l.append(path + str(rev))
+ return l
+
+
+class MergeGraph(pydot.Graph):
+ """Base class, not intended for direct use. Use MergeDot for the main
+ graph and MergeSubgraph for a subgraph.
+ """
+
+ def mk_origin_node(graph, name, label):
+ """Add a node to the graph"""
+ graph.add_node(Node(name, label=label, shape='plaintext'))
+
+ def mk_invis_node(graph, name):
+ """Add a node to the graph"""
+ graph.add_node(Node(name, style='invis'))
+
+ def mk_node(graph, name, label=None):
+ """Add a node to the graph, if not already present"""
+ if not graph.get_node(name):
+ if not label:
+ label = name
+ if name in graph.changes:
+ graph.add_node(Node(name, label=label))
+ else:
+ graph.add_node(Node(name, color='grey', label=''))
+
+ def mk_merge_target(graph, target_node, important):
+ """Add a merge target node to the graph."""
+ if important:
+ color = 'red'
+ else:
+ color = 'black'
+ graph.add_node(Node(target_node, color=color, fontcolor=color, style='bold'))
+
+ def mk_edge(graph, name1, name2, **attrs):
+ """Add an ordinary edge to the graph"""
+ graph.add_edge(Edge(name1, name2, dir='none', style='dotted', color='grey', **attrs))
+
+ def mk_br_edge(graph, name1, name2):
+ """Add a branch-creation edge to the graph"""
+ # Constraint=false to avoid the Y-shape skewing the nice parallel branch lines
+ graph.mk_edge(name1, name2, constraint='false')
+
+ def mk_merge_edge(graph, src_node, tgt_node, kind, label, important):
+ """Add a merge edge to the graph"""
+ if important:
+ color = 'red'
+ else:
+ color = 'grey'
+ e = Edge(src_node, tgt_node, constraint='false',
+ label='"' + label + '"',
+ color=color, fontcolor=color,
+ style='bold')
+ if kind.startswith('cherry'):
+ e.set_style('dashed')
+ graph.add_edge(e)
+
+ def mk_mergeinfo_edge(graph, base_node, src_node, important):
+ """"""
+ if important:
+ color = 'red'
+ else:
+ color = 'grey'
+ graph.add_edge(Edge(base_node, src_node,
+ dir='both', arrowtail='odot', arrowhead='tee',
+ color=color, constraint='false'))
+
+ def mk_invis_edge(graph, name1, name2):
+ """Add an invisible edge to the graph"""
+ graph.add_edge(Edge(name1, name2, style='invis'))
+
+ def add_merge(graph, merge, important):
+ """Add a merge"""
+ base_node, src_node, tgt_node, kind = merge
+
+ if base_node and src_node: # and not kind.startwith('cherry'):
+ graph.mk_mergeinfo_edge(base_node, src_node, important)
+
+ # Merge target node
+ graph.mk_merge_target(tgt_node, important)
+
+ # Merge edge
+ graph.mk_merge_edge(src_node, tgt_node, kind, kind, important)
+
+ def add_annotation(graph, node, label, color='lightblue'):
+ """Add a graph node that serves as an annotation to a normal node.
+ More than one annotation can be added to the same normal node.
+ """
+ subg_name = node + '_annotations'
+
+ def get_subgraph(graph, name):
+ """Equivalent to pydot.Graph.get_subgraph() when there is no more than
+ one subgraph of the given name, but working aroung a bug in
+ pydot.Graph.get_subgraph().
+ """
+ for subg in graph.get_subgraph_list():
+ if subg.get_name() == name:
+ return subg
+ return None
+
+ g = get_subgraph(graph, subg_name)
+ if not g:
+ g = pydot.Subgraph(subg_name, rank='same')
+ graph.add_subgraph(g)
+
+ ann_node = node + '_'
+ while g.get_node(ann_node):
+ ann_node = ann_node + '_'
+ g.add_node(Node(ann_node, shape='box', style='filled', color=color,
+ label='"' + label + '"'))
+ g.add_edge(Edge(ann_node, node, style='solid', color=color,
+ dir='none', constraint='false'))
+
+class MergeSubgraph(MergeGraph, pydot.Subgraph):
+ """"""
+ def __init__(graph, **attrs):
+ """"""
+ MergeGraph.__init__(graph)
+ pydot.Subgraph.__init__(graph, **attrs)
+
+class MergeDot(MergeGraph, pydot.Dot):
+ """
+ # TODO: In the 'merges' input, find the predecessor automatically.
+ """
+ def __init__(graph, config_filename=None,
+ filename=None, title=None, branches=None, changes=None,
+ merges=[], annotations=[], **attrs):
+ """Return a new MergeDot graph generated from a config file or args."""
+ MergeGraph.__init__(graph)
+ pydot.Dot.__init__(graph, **attrs)
+
+ if config_filename:
+ graph.read_config(config_filename)
+ else:
+ graph.filename = filename
+ graph.title = title
+ graph.branches = branches
+ graph.changes = changes
+ graph.merges = merges
+ graph.annotations = annotations
+
+ graph.construct()
+
+ def read_config(graph, config_filename):
+ """Initialize a MergeDot graph's input data from a config file."""
+ import ConfigParser
+ if config_filename.endswith('.txt'):
+ default_basename = config_filename[:-4]
+ else:
+ default_basename = config_filename
+
+ config = ConfigParser.SafeConfigParser({ 'basename': default_basename,
+ 'title': None,
+ 'merges': '[]',
+ 'annotations': '[]' })
+ files_read = config.read(config_filename)
+ if len(files_read) == 0:
+ sys.stderr.write('graph: unable to read graph config from "' + config_filename + '"\n')
+ sys.exit(1)
+ graph.basename = config.get('graph', 'basename')
+ graph.title = config.get('graph', 'title')
+ graph.branches = eval(config.get('graph', 'branches'))
+ graph.changes = eval(config.get('graph', 'changes'))
+ graph.merges = eval(config.get('graph', 'merges'))
+ graph.annotations = eval(config.get('graph', 'annotations'))
+
+ def construct(graph):
+ """"""
+ # Origin nodes (done first, in an attempt to set the order)
+ for br, orig, r1, head in graph.branches:
+ name = br + '0'
+ if r1 > 0:
+ graph.mk_origin_node(name, br)
+ else:
+ graph.mk_node(name, label=br)
+
+ # Edges and target nodes for merges
+ for merge in graph.merges:
+ # Emphasize the last merge, as it's the important one
+ important = (merge == graph.merges[-1])
+ graph.add_merge(merge, important)
+
+ # Parallel edges for basic lines of descent
+ for br, orig, r1, head in graph.branches:
+ sub_g = MergeSubgraph(ordering='out')
+ for i in range(1, head + 1):
+ prev_n = br + str(i - 1)
+ this_n = br + str(i)
+
+ # Normal edges and nodes
+ if i < r1:
+ graph.mk_invis_node(this_n)
+ else:
+ graph.mk_node(this_n)
+ if i <= r1:
+ graph.mk_invis_edge(prev_n, this_n)
+ else:
+ graph.mk_edge(prev_n, this_n)
+
+ # Branch creation edges
+ if orig:
+ sub_g.mk_br_edge(orig, br + str(r1))
+
+ graph.add_subgraph(sub_g)
+
+ # Annotations
+ for node, label in graph.annotations:
+ graph.add_annotation(node, label)
+
+ # A title for the graph (added last so it goes at the top)
+ if graph.title:
+ graph.add_node(Node('title', shape='plaintext', label='"' + graph.title + '"'))
+
+ def save(graph, format='png', filename=None):
+ """Save this merge graph to the given file format. If filename is None,
+ construct a filename from the basename of the original file (as passed
+ to the constructor and then stored in graph.basename) and the suffix
+ according to the given format.
+ """
+ if not filename:
+ filename = graph.basename + '.' + format
+ if format == 'sh':
+ import save_as_sh
+ save_as_sh.write_sh_file(graph, filename)
+ else:
+ pydot.Dot.write(graph, filename, format=format)
diff --git a/tools/dev/mergegraph/save_as_sh.py b/tools/dev/mergegraph/save_as_sh.py
new file mode 100644
index 0000000..c5124ca
--- /dev/null
+++ b/tools/dev/mergegraph/save_as_sh.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+
+
+# This module writes a sequence of 'svn' commands to a file, that when
+# run will perform the branching and merging described by a given MergeDot
+# graph description object.
+
+
+def shebang_line(out):
+ out.write('#!/bin/sh\n')
+
+def command(out, cmd, *args):
+ """Write the shell command CMD with the arguments ARGS to the file-like
+ object OUT.
+ """
+ out.write(' '.join((cmd,) + args) + "\n")
+
+def svn(out, subcmd, *args):
+ """Write an svn command with the given subcommand and arguments. Write
+ to the file-like object OUT.
+ """
+ command(out, 'svn', subcmd, *args)
+
+def comment(out, text):
+ """Write the comment TEXT to the file-like object OUT.
+ """
+ out.write('# %s\n' % text)
+
+def node_branch(node_name):
+ """Extract branch name from a node name.
+ ### TODO: multi-char names.
+ """
+ return node_name[:1]
+
+def node_url(node_name):
+ """Extract the URL (in command-line repo-relative URL syntax) from a
+ node name.
+ """
+ return '^/' + node_branch(node_name)
+
+def node_rev(node_name):
+ """Extract revnum (as an integer) from a node name.
+ ### TODO: multi-char names.
+ """
+ return int(node_name[1:]) + 1
+
+def add(revs, node_name, action, *args):
+ """Add the tuple (ACTION, (ARGS)) to the list REVS[REVNUM].
+ """
+ revnum = node_rev(node_name)
+ if not revnum in revs:
+ revs[revnum] = []
+ revs[revnum].append((action, args))
+
+def write_recipe(graph, out):
+ """Write out a sequence of svn commands that will execute the branching
+ and merging shown in GRAPH. Write to the file-like object OUT.
+ """
+ revs = {} # keyed by revnum
+
+ for br, orig, r1, head in graph.branches:
+ if orig:
+ add(revs, br + str(r1), 'copy', orig, br)
+ else:
+ add(revs, br + str(r1), 'mkproj', br)
+
+ for base_node, src_node, tgt_node, kind in graph.merges:
+ add(revs, tgt_node, 'merge', src_node, tgt_node, kind)
+
+ for node_name in graph.changes:
+ # Originally the 'changes' list could have entries that overlapped with
+ # merges. We must either disallow that or filter out such changes here.
+ #if not node_name in revs:
+ add(revs, node_name, 'modify', node_name)
+
+ # Execute the actions for each revision in turn.
+ for r in sorted(revs.keys()):
+ comment(out, 'start r' + str(r))
+ for action, params in revs[r]:
+ #comment(out, '(' + action + ' ' + params + ')')
+ if action == 'mkproj':
+ (br,) = params
+ svn(out, 'mkdir', br, br + '/created_in_' + br)
+ elif action == 'copy':
+ (orig, br) = params
+ svn(out, 'copy', '-r' + str(node_rev(orig)), node_branch(orig), br)
+ elif action == 'modify':
+ (node_name,) = params
+ svn(out, 'mkdir', node_branch(node_name) + '/new_in_' + node_name)
+ elif action == 'merge':
+ (src_node, tgt_node, kind) = params
+ assert node_rev(tgt_node) == r
+ svn(out, 'update')
+ if kind == 'cherry':
+ svn(out, 'merge',
+ '-c' + str(node_rev(src_node)), node_url(src_node),
+ node_branch(tgt_node))
+ elif kind.startswith('reint'):
+ svn(out, 'merge', '--reintegrate',
+ node_url(src_node) + '@' + str(node_rev(src_node)),
+ node_branch(tgt_node))
+ else:
+ svn(out, 'merge',
+ node_url(src_node) + '@' + str(node_rev(src_node)),
+ node_branch(tgt_node))
+ else:
+ raise Exception('unknown action: %s' % action)
+ svn(out, 'commit', '-m', 'r' + str(r))
+
+def write_sh_file(graph, filename):
+ """Write a file containing a sequence of 'svn' commands that when run will
+ perform the branching and merging described by the MergeDot object
+ GRAPH. Write to a new file named FILENAME.
+ """
+ out_stream = open(filename, 'w')
+ shebang_line(out_stream)
+ write_recipe(graph, out_stream)
+ out_stream.close()
diff --git a/tools/dev/min-includes.sh b/tools/dev/min-includes.sh
new file mode 100755
index 0000000..53dfb84
--- /dev/null
+++ b/tools/dev/min-includes.sh
@@ -0,0 +1,80 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+# Attempt to figure out the minimum set of includes for our header files.
+#
+# ### this is incomplete. it merely lists the header files in order from
+# ### "no dependencies on other svn headers" to the larger header files
+# ### which have dependencies. manually working through the headers in
+# ### this order will minimize includes.
+#
+# Each header file is test-compiled to ensure that it has enough headers.
+# Of course, this could be false-positive because another header that
+# has been included has further included something to enable compilation
+# of the header in question. More sophisticated testing (e.g. filtering
+# includes out of the included header) would be necessary for detection.
+#
+
+files="*.h private/*.h"
+deps="deps.$$"
+
+INCLUDES="-I. -I.. -I/usr/include/apr-1 -I/usr/include/apache2"
+
+rm -f "$deps"
+for f in $files ; do
+ sed -n "s%#include \"\(svn_[a-z0-9_]*\.h\)\".*%$f \1%p" $f | fgrep -v svn_private_config.h >> "$deps"
+done
+
+
+function process_file ()
+{
+ echo "Processing $header"
+
+ echo "#include \"$header\"" > "$deps".c
+ gcc -o /dev/null -S $INCLUDES "$deps".c
+
+ ### monkey the includes and recompile to find the minimal set
+}
+
+while test -s "$deps" ; do
+#wc -l $deps
+
+ for header in $files ; do
+
+ if grep -q "^$header" "$deps" ; then
+ continue
+ fi
+
+ process_file
+
+ fgrep -v "$header" "$deps" > "$deps".new
+ mv "$deps".new "$deps"
+
+ files="`echo $files | sed s%$header%%`"
+ break
+ done
+
+done
+
+for header in $files ; do
+ process_file
+done
diff --git a/tools/dev/mklog.py b/tools/dev/mklog.py
new file mode 100755
index 0000000..a60712e
--- /dev/null
+++ b/tools/dev/mklog.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+# Read a diff from stdin, and output a log message template to stdout.
+# Hint: It helps if the diff was generated using 'svn diff -x -p'
+#
+# Note: Don't completely trust the generated log message. This script
+# depends on the correct output of 'diff -x -p', which can sometimes get
+# confused.
+
+import sys, re
+
+rm = re.compile('@@.*@@ (.*)\(.*$')
+
+def main():
+ for line in sys.stdin:
+ if line[0:6] == 'Index:':
+ print('\n* %s' % line[7:-1])
+ prev_funcname = ''
+ continue
+ match = rm.search(line[:-1])
+ if match:
+ if prev_funcname == match.group(1):
+ continue
+ print(' (%s):' % match.group(1))
+ prev_funcname = match.group(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/dev/mlpatch.py b/tools/dev/mlpatch.py
new file mode 100755
index 0000000..d74d820
--- /dev/null
+++ b/tools/dev/mlpatch.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+# mlpatch.py: Run with no arguments for usage
+
+import sys, os
+import sgmllib
+try:
+ # Python >=3.0
+ from html.entities import entitydefs
+ from urllib.request import urlopen as urllib_request_urlopen
+except ImportError:
+ # Python <3.0
+ from htmlentitydefs import entitydefs
+ from urllib2 import urlopen as urllib_request_urlopen
+import fileinput
+
+CHUNKSIZE = 8 * 1024
+
+class MyParser(sgmllib.SGMLParser):
+ def __init__(self):
+ self.baseclass = sgmllib.SGMLParser
+ self.baseclass.__init__(self)
+ self.entitydefs = entitydefs
+ self.entitydefs["nbsp"] = " "
+ self.inbody = False
+ self.complete_line = False
+ self.discard_gathered()
+
+ def discard_gathered(self):
+ self.gather_data = False
+ self.gathered_data = ""
+
+ def noop(self):
+ pass
+
+ def out(self, data):
+ sys.stdout.write(data)
+
+ def handle_starttag(self, tag, method, attrs):
+ if not self.inbody: return
+ self.baseclass.handle_starttag(self, tag, method, attrs)
+
+ def handle_endtag(self, tag, method):
+ if not self.inbody: return
+ self.baseclass.handle_endtag(self, tag, method)
+
+ def handle_data(self, data):
+ if not self.inbody: return
+ data = data.replace('\n','')
+ if len(data) == 0: return
+ if self.gather_data:
+ self.gathered_data += data
+ else:
+ if self.complete_line:
+ if data[0] in ('+', '-', ' ', '#') \
+ or data.startswith("Index:") \
+ or data.startswith("@@ ") \
+ or data.startswith("======"):
+ # Real new line
+ self.out('\n')
+ else:
+ # Presume that we are wrapped
+ self.out(' ')
+ self.complete_line = False
+ self.out(data)
+
+ def handle_charref(self, ref):
+ if not self.inbody: return
+ self.baseclass.handle_charref(self, ref)
+
+ def handle_entityref(self, ref):
+ if not self.inbody: return
+ self.baseclass.handle_entityref(self, ref)
+
+ def handle_comment(self, comment):
+ if comment == ' body="start" ':
+ self.inbody = True
+ elif comment == ' body="end" ':
+ self.inbody = False
+
+ def handle_decl(self, data):
+ if not self.inbody: return
+ print("DECL: " + data)
+
+ def unknown_starttag(self, tag, attrs):
+ if not self.inbody: return
+ print("UNKTAG: %s %s" % (tag, attrs))
+
+ def unknown_endtag(self, tag):
+ if not self.inbody: return
+ print("UNKTAG: /%s" % (tag))
+
+ def do_br(self, attrs):
+ self.complete_line = True
+
+ def do_p(self, attrs):
+ if self.complete_line:
+ self.out('\n')
+ self.out(' ')
+ self.complete_line = True
+
+ def start_a(self, attrs):
+ self.gather_data = True
+
+ def end_a(self):
+ self.out(self.gathered_data.replace('_at_', '@'))
+ self.discard_gathered()
+
+ def close(self):
+ if self.complete_line:
+ self.out('\n')
+ self.baseclass.close(self)
+
+
+def main():
+ if len(sys.argv) == 1:
+ sys.stderr.write(
+ "usage: mlpatch.py dev|users year month msgno > foobar.patch\n" +
+ "example: mlpatch.py dev 2005 01 0001 > issue-XXXX.patch\n" +
+ """
+ Very annoyingly, the http://svn.haxx.se/ subversion mailing list archives
+ mangle inline patches, and provide no raw message download facility
+ (other than for an entire month's email as an mbox).
+
+ So, I wrote this script, to demangle them. It's not perfect, as it has to
+ guess about whitespace, but it does an acceptable job.\n""")
+ sys.exit(0)
+ elif len(sys.argv) != 5:
+ sys.stderr.write("error: mlpatch.py: Bad parameters - run with no "
+ + "parameters for usage\n")
+ sys.exit(1)
+ else:
+ list, year, month, msgno = sys.argv[1:]
+ url = "http://svn.haxx.se/" \
+ + "%(list)s/archive-%(year)s-%(month)s/%(msgno)s.shtml" % locals()
+ print("MsgUrl: " + url)
+ msgfile = urllib_request_urlopen(url)
+ p = MyParser()
+ buffer = msgfile.read(CHUNKSIZE)
+ while buffer:
+ p.feed(buffer)
+ buffer = msgfile.read(CHUNKSIZE)
+ p.close()
+ msgfile.close()
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/dev/normalize-dump.py b/tools/dev/normalize-dump.py
new file mode 100755
index 0000000..10cde4e
--- /dev/null
+++ b/tools/dev/normalize-dump.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+import sys
+import re
+
+header_re = re.compile(r'^([^:]*): ?(.*)$')
+
+class NodePath:
+ def __init__(self, path, headers):
+ self.path = path
+ self.headers = headers
+
+ def dump(self):
+ print((' ' * 3) + self.path)
+ headers = sorted(self.headers.keys())
+ for header in headers:
+ print((' ' * 6) + header + ': ' + self.headers[header])
+
+
+def dump_revision(rev, nodepaths):
+ sys.stderr.write('* Normalizing revision ' + rev + '...')
+ print('Revision ' + rev)
+ paths = sorted(nodepaths.keys())
+ for path in paths:
+ nodepath = nodepaths[path]
+ nodepath.dump()
+ sys.stderr.write('done\n')
+
+
+
+def parse_header_block(fp):
+ headers = {}
+ while True:
+ line = fp.readline()
+ if line == '':
+ return headers, 1
+ line = line.strip()
+ if line == '':
+ return headers, 0
+ matches = header_re.match(line)
+ if not matches:
+ raise Exception('Malformed header block')
+ headers[matches.group(1)] = matches.group(2)
+
+
+def parse_file(fp):
+ nodepaths = {}
+ current_rev = None
+
+ while True:
+ # Parse a block of headers
+ headers, eof = parse_header_block(fp)
+
+ # This is a revision header block
+ if 'Revision-number' in headers:
+
+ # If there was a previous revision, dump it
+ if current_rev:
+ dump_revision(current_rev, nodepaths)
+
+ # Reset the data for this revision
+ current_rev = headers['Revision-number']
+ nodepaths = {}
+
+ # Skip the contents
+ prop_len = headers.get('Prop-content-length', 0)
+ fp.read(int(prop_len))
+
+ # This is a node header block
+ elif 'Node-path' in headers:
+
+ # Make a new NodePath object, and add it to the
+ # dictionary thereof
+ path = headers['Node-path']
+ node = NodePath(path, headers)
+ nodepaths[path] = node
+
+ # Skip the content
+ text_len = headers.get('Text-content-length', 0)
+ prop_len = headers.get('Prop-content-length', 0)
+ fp.read(int(text_len) + int(prop_len))
+
+ # Not a revision, not a node -- if we've already seen at least
+ # one revision block, we are in an errorful state.
+ elif current_rev and len(headers.keys()):
+ raise Exception('Header block from outta nowhere')
+
+ if eof:
+ if current_rev:
+ dump_revision(current_rev, nodepaths)
+ break
+
+def usage():
+ print('Usage: ' + sys.argv[0] + ' [DUMPFILE]')
+ print('')
+ print('Reads a Subversion dumpfile from DUMPFILE (or, if not provided,')
+ print('from stdin) and normalizes the metadata contained therein,')
+ print('printing summarized and sorted information. This is useful for')
+ print('generating data about dumpfiles in a diffable fashion.')
+ sys.exit(0)
+
+def main():
+ if len(sys.argv) > 1:
+ if sys.argv[1] == '--help':
+ usage()
+ fp = open(sys.argv[1], 'rb')
+ else:
+ fp = sys.stdin
+ parse_file(fp)
+
+
+if __name__ == '__main__':
+ main()
+
+
+
+
diff --git a/tools/dev/po-merge.py b/tools/dev/po-merge.py
new file mode 100755
index 0000000..e63a739
--- /dev/null
+++ b/tools/dev/po-merge.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+import os, re, sys
+
+msgstr_re = re.compile('msgstr\[\d+\] "')
+
+def parse_translation(f):
+ """Read a single translation entry from the file F and return a
+ tuple with the comments, msgid, msgid_plural and msgstr. The comments is
+ returned as a list of lines which do not end in new-lines. The msgid is
+ string. The msgid_plural is string or None. The msgstr is a list of
+ strings. The msgid, msgid_plural and msgstr strings can contain embedded
+ newlines"""
+ line = f.readline()
+
+ # Parse comments
+ comments = []
+ while True:
+ if line.strip() == '' or line[:2] == '#~':
+ return comments, None, None, None
+ elif line[0] == '#':
+ comments.append(line[:-1])
+ else:
+ break
+ line = f.readline()
+
+ # Parse msgid
+ if line[:7] != 'msgid "' or line[-2] != '"':
+ raise RuntimeError("parse error")
+ msgid = line[6:-1]
+ while True:
+ line = f.readline()
+ if line[0] != '"':
+ break
+ msgid = msgid[:-1] + line[1:-1]
+
+ # Parse optional msgid_plural
+ msgid_plural = None
+ if line[:14] == 'msgid_plural "':
+ if line[-2] != '"':
+ raise RuntimeError("parse error")
+ msgid_plural = line[13:-1]
+ while True:
+ line = f.readline()
+ if line[0] != '"':
+ break
+ msgid_plural = msgid_plural[:-1] + line[1:-1]
+
+ # Parse msgstr
+ msgstr = []
+ if not msgid_plural:
+ if line[:8] != 'msgstr "' or line[-2] != '"':
+ raise RuntimeError("parse error")
+ msgstr.append(line[7:-1])
+ while True:
+ line = f.readline()
+ if len(line) == 0 or line[0] != '"':
+ break
+ msgstr[0] += '\n' + line[:-1]
+ else:
+ if line[:7] != 'msgstr[' or line[-2] != '"':
+ raise RuntimeError("parse error")
+ i = 0
+ while True:
+ matched_msgstr = msgstr_re.match(line)
+ if matched_msgstr:
+ matched_msgstr_len = len(matched_msgstr.group(0))
+ msgstr.append(line[matched_msgstr_len-1:-1])
+ else:
+ break
+ while True:
+ line = f.readline()
+ if len(line) == 0 or line[0] != '"':
+ break
+ msgstr[i] += '\n' + line[:-1]
+ i += 1
+
+ if line.strip() != '':
+ raise RuntimeError("parse error")
+
+ return comments, msgid, msgid_plural, msgstr
+
+def split_comments(comments):
+ """Split COMMENTS into flag comments and other comments. Flag
+ comments are those that begin with '#,', e.g. '#,fuzzy'."""
+ flags = []
+ other = []
+ for c in comments:
+ if len(c) > 1 and c[1] == ',':
+ flags.append(c)
+ else:
+ other.append(c)
+ return flags, other
+
+def main(argv):
+ if len(argv) != 2:
+ argv0 = os.path.basename(argv[0])
+ sys.exit('Usage: %s <lang.po>\n'
+ '\n'
+ 'This script will replace the translations and flags in lang.po (LF line endings)\n'
+ 'with the translations and flags in the source po file read from standard input.\n'
+ 'Strings that are not found in the source file are left untouched.\n'
+ 'A backup copy of lang.po is saved as lang.po.bak.\n'
+ '\n'
+ 'Example:\n'
+ ' svn cat http://svn.apache.org/repos/asf/subversion/trunk/subversion/po/sv.po | \\\n'
+ ' %s sv.po' % (argv0, argv0))
+
+ # Read the source po file into a hash
+ source = {}
+ while True:
+ comments, msgid, msgid_plural, msgstr = parse_translation(sys.stdin)
+ if not comments and msgid is None:
+ break
+ if msgid is not None:
+ source[msgid] = msgstr, split_comments(comments)[0]
+
+ # Make a backup of the output file, open the copy for reading
+ # and the original for writing.
+ os.rename(argv[1], argv[1] + '.bak')
+ infile = open(argv[1] + '.bak')
+ outfile = open(argv[1], 'w')
+
+ # Loop thought the original and replace stuff as we go
+ first = 1
+ string_count = 0
+ update_count = 0
+ untranslated = 0
+ fuzzy = 0
+ while True:
+ comments, msgid, msgid_plural, msgstr = parse_translation(infile)
+ if not comments and msgid is None:
+ break
+ if not first:
+ outfile.write('\n')
+ first = 0
+ if msgid is None:
+ outfile.write('\n'.join(comments) + '\n')
+ else:
+ string_count += 1
+ # Do not update the header, and only update if the source
+ # has a non-empty translation.
+ if msgid != '""' and source.get(msgid, ['""', []])[0] != '""':
+ other = split_comments(comments)[1]
+ new_msgstr, new_flags = source[msgid]
+ new_comments = other + new_flags
+ if new_msgstr != msgstr or new_comments != comments:
+ update_count += 1
+ msgstr = new_msgstr
+ comments = new_comments
+ outfile.write('\n'.join(comments) + '\n')
+ outfile.write('msgid ' + msgid + '\n')
+ if not msgid_plural:
+ outfile.write('msgstr ' + msgstr[0] + '\n')
+ else:
+ outfile.write('msgid_plural ' + msgid_plural + '\n')
+ n = 0
+ for i in msgstr:
+ outfile.write('msgstr[%s] %s\n' % (n, msgstr[n]))
+ n += 1
+ if msgstr is not None:
+ for m in msgstr:
+ if m == '""':
+ untranslated += 1
+ for c in comments:
+ if c.startswith('#,') and 'fuzzy' in c.split(', '):
+ fuzzy += 1
+
+ # We're done. Tell the user what we did.
+ print(('%d strings updated. '
+ '%d fuzzy strings. '
+ '%d of %d strings are still untranslated (%.0f%%).' %
+ (update_count, fuzzy, untranslated, string_count,
+ 100.0 * untranslated / string_count)))
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/tools/dev/prebuild-cleanup.sh b/tools/dev/prebuild-cleanup.sh
new file mode 100755
index 0000000..2ef6b80
--- /dev/null
+++ b/tools/dev/prebuild-cleanup.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+### Purify a system, to simulate building Subversion on a "clean" box.
+###
+### You'll probably need to run this as `root', and may need to change
+### some paths for your system.
+
+# Clean out old apr, apr-util config scripts.
+rm /usr/local/bin/apr-config
+rm /usr/local/bin/apu-config
+
+# Clean out libs.
+rm -f /usr/local/lib/APRVARS
+rm -f /usr/local/lib/libapr*
+rm -f /usr/local/lib/libexpat*
+rm -f /usr/local/lib/libneon*
+rm -f /usr/local/lib/libsvn*
+
+# Clean out headers.
+rm -f /usr/local/include/apr*
+rm -f /usr/local/include/svn*
+rm -f /usr/local/include/neon/*
+
+### Not sure this would be useful:
+# rm -f /usr/local/apache2/lib/*
diff --git a/tools/dev/random-commits.py b/tools/dev/random-commits.py
new file mode 100755
index 0000000..a243990
--- /dev/null
+++ b/tools/dev/random-commits.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+# USAGE: random-commits.py
+#
+# Using the FILELIST (see config below), a series of COUNT commits will be
+# constructed, each changing up to MAXFILES files per commit. The commands
+# will be sent to stdout (formatted as a shell script).
+#
+# The FILELIST can be constructed using the find-textfiles script.
+#
+
+import random
+
+FILELIST = 'textfiles'
+COUNT = 1000 # this many commits
+MAXFILES = 10 # up to 10 files at a time
+
+files = open(FILELIST).readlines()
+
+print('#!/bin/sh')
+
+for i in range(COUNT):
+ n = random.randrange(1, MAXFILES+1)
+ l = [ ]
+ print("echo '--- begin commit #%d -----------------------------------'" % (i+1,))
+ for j in range(n):
+ fname = random.choice(files)[:-1] # strip trailing newline
+ print("echo 'part of change #%d' >> %s" % (i+1, fname))
+ l.append(fname)
+ print("svn commit -m 'commit #%d' %s" % (i+1, ' '.join(l)))
diff --git a/tools/dev/remove-trailing-whitespace.sh b/tools/dev/remove-trailing-whitespace.sh
new file mode 100755
index 0000000..1dbde0c
--- /dev/null
+++ b/tools/dev/remove-trailing-whitespace.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+ for ext in c h cpp java py pl rb hpp cmd bat sql sh; do
+ find . -name "*.$ext" -not -type l -exec \
+ perl -pi -e 's/[ \t]*$//' {} + ;
+ # don't use \s to not strip ^L pagebreaks
+ done
diff --git a/tools/dev/sbox-ospath.py b/tools/dev/sbox-ospath.py
new file mode 100755
index 0000000..9e38755
--- /dev/null
+++ b/tools/dev/sbox-ospath.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# USAGE:
+# $ ./sbox-ospath.py FILENAME
+#
+# This script will look for all lines in the file that use an expression
+# that looks like:
+# os.path.join(wc_dir, 'A', 'B')
+#
+# and rewrite that to:
+# sbox.ospath('A/B')
+#
+# Obviously, this relies heavily on standard naming for the variables in
+# our testing code. Visual inspection (and execution!) should be performed.
+#
+# The file is rewritten in place.
+#
+
+import sys
+import os
+import re
+
+RE_FIND_JOIN = re.compile(r'os\.path\.join\((?:sbox\.)?wc_dir, '
+ r'(["\'][^"\']*["\'](?:, ["\'][^"\']*["\'])*)\)')
+
+
+def rewrite_file(fname):
+ count = 0
+ lines = open(fname).readlines()
+ for i in range(len(lines)):
+ line = lines[i]
+ match = RE_FIND_JOIN.search(line)
+ if match:
+ start, end = match.span()
+ parts = match.group(1).replace('"', "'").replace("', '", '/')
+ lines[i] = line[:start] + 'sbox.ospath(' + parts + ')' + line[end:]
+ count += 1
+ if count == 0:
+ print('No changes.')
+ else:
+ open(fname, 'w').writelines(lines)
+ print('%s rewrites performed.' % (count,))
+
+
+if __name__ == '__main__':
+ rewrite_file(sys.argv[1])
diff --git a/tools/dev/scramble-tree.py b/tools/dev/scramble-tree.py
new file mode 100755
index 0000000..7857aad
--- /dev/null
+++ b/tools/dev/scramble-tree.py
@@ -0,0 +1,304 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+# scramble-tree.py: (See scramble-tree.py --help.)
+#
+# Makes multiple random file changes to a directory tree, for testing.
+#
+# This script will add some new files, remove some existing files, add
+# text to some existing files, and delete text from some existing
+# files. It will also leave some files completely untouched.
+#
+# The exact set of changes made is always the same for identical trees,
+# where "identical" means the names of files and directories are the
+# same, and they are arranged in the same tree structure (the actual
+# contents of files may differ). If two are not identical, the sets of
+# changes scramble-tree.py will make may differ arbitrarily.
+#
+# Directories named .svn/ and CVS/ are ignored.
+#
+# Example scenario, starting with a pristine Subversion working copy:
+#
+# $ ls
+# foo/
+# $ svn st foo
+# $ cp -r foo bar
+# $ svn st bar
+# $ scramble-tree.py foo
+# $ svn st foo
+# [... see lots of scary status output ...]
+# $ scramble-tree.py bar
+# [... see the exact same scary status output ...]
+# $ scramble-tree.py foo
+# [... see a new bunch of scary status output ...]
+# $
+
+import os
+import sys
+import getopt
+try:
+ my_getopt = getopt.gnu_getopt
+except AttributeError:
+ my_getopt = getopt.getopt
+import random
+from hashlib import md5 as hashlib_md5
+import base64
+
+
+class VCActions:
+ def __init__(self):
+ pass
+ def add_file(self, path):
+ """Add an existing file to version control."""
+ pass
+ def remove_file(self, path):
+ """Remove an existing file from version control, and delete it."""
+ pass
+
+
+class NoVCActions(VCActions):
+ def remove_file(self, path):
+ os.unlink(path)
+
+
+class CVSActions(VCActions):
+ def add_file(self, path):
+ cwd = os.getcwd()
+ try:
+ dirname, basename = os.path.split(path)
+ os.chdir(os.path.join(cwd, dirname))
+ os.system('cvs -Q add -m "Adding file to repository" "%s"' % (basename))
+ finally:
+ os.chdir(cwd)
+ def remove_file(self, path):
+ cwd = os.getcwd()
+ try:
+ dirname, basename = os.path.split(path)
+ os.chdir(os.path.join(cwd, dirname))
+ os.system('cvs -Q rm -f "%s"' % (basename))
+ finally:
+ os.chdir(cwd)
+
+
+class SVNActions(VCActions):
+ def add_file(self, path):
+ os.system('svn add --quiet "%s"' % (path))
+ def remove_file(self, path):
+ os.remove(path)
+ os.system('svn rm --quiet --force "%s"' % (path))
+
+
+class hashDir:
+ """Given a directory, creates a string containing all directories
+ and files under that directory (sorted alphanumerically) and makes a
+ base64-encoded md5 hash of the resulting string. Call
+ hashDir.gen_seed() to generate a seed value for this tree."""
+
+ def __init__(self, rootdir):
+ self.allfiles = []
+ for dirpath, dirs, files in os.walk(rootdir):
+ self.walker_callback(len(rootdir), dirpath, dirs + files)
+
+ def gen_seed(self):
+ # Return a base64-encoded (kinda ... strip the '==\n' from the
+ # end) MD5 hash of sorted tree listing.
+ self.allfiles.sort()
+ return base64.encodestring(hashlib_md5(''.join(self.allfiles)).digest())[:-3]
+
+ def walker_callback(self, baselen, dirname, fnames):
+ if ((dirname == '.svn') or (dirname == 'CVS')):
+ return
+ self.allfiles.append(dirname[baselen:])
+ for filename in fnames:
+ path = os.path.join(dirname, filename)
+ if not os.path.isdir(path):
+ self.allfiles.append(path[baselen:])
+
+
+class Scrambler:
+ def __init__(self, seed, vc_actions, dry_run, quiet):
+ if not quiet:
+ print('SEED: ' + seed)
+
+ self.rand = random.Random(seed)
+ self.vc_actions = vc_actions
+ self.dry_run = dry_run
+ self.quiet = quiet
+ self.ops = [] ### ["add" | "munge", path]
+ self.greeking = """
+======================================================================
+This is some text that was inserted into this file by the lovely and
+talented scramble-tree.py script.
+======================================================================
+"""
+
+ ### Helpers
+ def shrink_list(self, list, remove_count):
+ if len(list) <= remove_count:
+ return []
+ for i in range(remove_count):
+ j = self.rand.randrange(len(list) - 1)
+ del list[j]
+ return list
+
+ def _make_new_file(self, dir):
+ i = 0
+ path = None
+ for i in range(99999):
+ path = os.path.join(dir, "newfile.%05d.txt" % i)
+ if not os.path.exists(path):
+ open(path, 'w').write(self.greeking)
+ return path
+ raise Exception("Ran out of unique new filenames in directory '%s'" % dir)
+
+ ### File Mungers
+ def _mod_append_to_file(self, path):
+ if not self.quiet:
+ print('append_to_file: %s' % path)
+ if self.dry_run:
+ return
+ fh = open(path, "a")
+ fh.write(self.greeking)
+ fh.close()
+
+ def _mod_remove_from_file(self, path):
+ if not self.quiet:
+ print('remove_from_file: %s' % path)
+ if self.dry_run:
+ return
+ lines = self.shrink_list(open(path, "r").readlines(), 5)
+ open(path, "w").writelines(lines)
+
+ def _mod_delete_file(self, path):
+ if not self.quiet:
+ print('delete_file: %s' % path)
+ if self.dry_run:
+ return
+ self.vc_actions.remove_file(path)
+
+ ### Public Interfaces
+ def get_randomizer(self):
+ return self.rand
+
+ def schedule_munge(self, path):
+ self.ops.append(tuple(["munge", path]))
+
+ def schedule_addition(self, dir):
+ self.ops.append(tuple(["add", dir]))
+
+ def enact(self, limit):
+ num_ops = len(self.ops)
+ if limit == 0:
+ return
+ elif limit > 0 and limit <= num_ops:
+ self.ops = self.shrink_list(self.ops, num_ops - limit)
+ for op, path in self.ops:
+ if op == "add":
+ path = self._make_new_file(path)
+ if not self.quiet:
+ print("add_file: %s" % path)
+ if self.dry_run:
+ return
+ self.vc_actions.add_file(path)
+ elif op == "munge":
+ file_mungers = [self._mod_append_to_file,
+ self._mod_append_to_file,
+ self._mod_append_to_file,
+ self._mod_remove_from_file,
+ self._mod_remove_from_file,
+ self._mod_remove_from_file,
+ self._mod_delete_file,
+ ]
+ self.rand.choice(file_mungers)(path)
+
+
+def usage(retcode=255):
+ print('Usage: %s [OPTIONS] DIRECTORY' % (sys.argv[0]))
+ print('')
+ print('Options:')
+ print(' --help, -h : Show this usage message.')
+ print(' --seed ARG : Use seed ARG to scramble the tree.')
+ print(' --use-svn : Use Subversion (as "svn") to perform file additions')
+ print(' and removals.')
+ print(' --use-cvs : Use CVS (as "cvs") to perform file additions')
+ print(' and removals.')
+ print(' --dry-run : Don\'t actually change the disk.')
+ print(' --limit N : Limit the scrambling to a maximum of N operations.')
+ print(' --quiet, -q : Run in stealth mode!')
+ sys.exit(retcode)
+
+
+def walker_callback(scrambler, dirname, fnames):
+ if ((dirname.find('.svn') != -1) or dirname.find('CVS') != -1):
+ return
+ rand = scrambler.get_randomizer()
+ if rand.randrange(5) == 1:
+ scrambler.schedule_addition(dirname)
+ for filename in fnames:
+ path = os.path.join(dirname, filename)
+ if not os.path.isdir(path) and rand.randrange(3) == 1:
+ scrambler.schedule_munge(path)
+
+
+def main():
+ seed = None
+ vc_actions = NoVCActions()
+ dry_run = 0
+ quiet = 0
+ limit = None
+
+ # Mm... option parsing.
+ optlist, args = my_getopt(sys.argv[1:], "hq",
+ ['seed=', 'use-svn', 'use-cvs',
+ 'help', 'quiet', 'dry-run', 'limit='])
+ for opt, arg in optlist:
+ if opt == '--help' or opt == '-h':
+ usage(0)
+ if opt == '--seed':
+ seed = arg
+ if opt == '--use-svn':
+ vc_actions = SVNActions()
+ if opt == '--use-cvs':
+ vc_actions = CVSActions()
+ if opt == '--dry-run':
+ dry_run = 1
+ if opt == '--limit':
+ limit = int(arg)
+ if opt == '--quiet' or opt == '-q':
+ quiet = 1
+
+ # We need at least a path to work with, here.
+ argc = len(args)
+ if argc < 1 or argc > 1:
+ usage()
+ rootdir = args[0]
+
+ # If a seed wasn't provide, calculate one.
+ if seed is None:
+ seed = hashDir(rootdir).gen_seed()
+ scrambler = Scrambler(seed, vc_actions, dry_run, quiet)
+ for dirpath, dirs, files in os.walk(rootdir):
+ walker_callback(scrambler, dirpath, dirs + files)
+ scrambler.enact(limit)
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/dev/stress.pl b/tools/dev/stress.pl
new file mode 100755
index 0000000..5b76be3
--- /dev/null
+++ b/tools/dev/stress.pl
@@ -0,0 +1,498 @@
+#!/usr/bin/perl -w
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+
+# A script that allows some simple testing of Subversion, in
+# particular concurrent read, write and read-write access by the 'svn'
+# client. It can also create working copy trees containing a large
+# number of files and directories. All repository access is via the
+# 'svnadmin' and 'svn' commands.
+#
+# This script constructs a repository, and populates it with
+# files. Then it loops making changes to a subset of the files and
+# committing the tree. Thus when two, or more, instances are run in
+# parallel there is concurrent read and write access. Sometimes a
+# commit will fail due to a commit conflict. This is expected, and is
+# automatically resolved by updating the working copy.
+#
+# Each file starts off containing:
+# A0
+# 0
+# A1
+# 1
+# A2
+# .
+# .
+# A9
+# 9
+#
+# The script runs with an ID in the range 0-9, and when it modifies a
+# file it modifes the line that starts with its ID. Thus scripts with
+# different IDs will make changes that can be merged automatically.
+#
+# The main loop is then:
+#
+# step 1: modify a random selection of files
+#
+# step 2: optional sleep or wait for RETURN keypress
+#
+# step 3: update the working copy automatically merging out-of-date files
+#
+# step 4: try to commit, if not successful go to step 3 otherwise go to step 1
+#
+# To allow break-out of potentially infinite loops, the script will
+# terminate if it detects the presence of a "stop file", the path to
+# which is specified with the -S option (default ./stop). This allows
+# the script to be stopped without any danger of interrupting an 'svn'
+# command, which experiment shows may require Berkeley db_recover to
+# be used on the repository.
+#
+# Running the Script
+# ==================
+#
+# Use three xterms all with shells on the same directory. In the
+# first xterm run (note, this will remove anything called repostress
+# in the current directory)
+#
+# % stress.pl -c -s1
+#
+# When the message "Committed revision 1." scrolls pass use the second
+# xterm to run
+#
+# % stress.pl -s1
+#
+# Both xterms will modify, update and commit separate working copies to
+# the same repository.
+#
+# Use the third xterm to touch a file 'stop' to cause the scripts to
+# exit cleanly, i.e. without interrupting an svn command.
+#
+# To run a third, fourth, etc. instance of the script use -i
+#
+# % stress.pl -s1 -i2
+# % stress.pl -s1 -i3
+#
+# Running several instances at once will cause a *lot* of disk
+# activity. I have run ten instances simultaneously on a Linux tmpfs
+# (RAM based) filesystem -- watching ten xterms scroll irregularly
+# can be quite hypnotic!
+
+use strict;
+use IPC::Open3;
+use Getopt::Std;
+use File::Find;
+use File::Path;
+use File::Spec::Functions;
+use Cwd;
+
+# The name of this script, for error messages.
+my $stress = 'stress.pl';
+
+# When testing BDB 4.4 and later with DB_RECOVER enabled, the criteria
+# for a failed update and commit are a bit looser than otherwise.
+my $dbrecover = undef;
+
+# Repository check/create
+sub init_repo
+ {
+ my ( $repo, $create, $no_sync, $fsfs ) = @_;
+ if ( $create )
+ {
+ rmtree([$repo]) if -e $repo;
+ my $svnadmin_cmd = "svnadmin create $repo";
+ $svnadmin_cmd .= " --fs-type bdb" if not $fsfs;
+ $svnadmin_cmd .= " --bdb-txn-nosync" if $no_sync;
+ system( $svnadmin_cmd) and die "$stress: $svnadmin_cmd: failed: $?\n";
+ open ( CONF, ">>$repo/conf/svnserve.conf")
+ or die "$stress: open svnserve.conf: $!\n";
+ print CONF "[general]\nanon-access = write\n";
+ close CONF or die "$stress: close svnserve.conf: $!\n";
+ }
+ $repo = getcwd . "/$repo" if not file_name_is_absolute $repo;
+ $dbrecover = 1 if -e "$repo/db/__db.register";
+ print "$stress: BDB automatic database recovery enabled\n" if $dbrecover;
+ return $repo;
+ }
+
+# Check-out a working copy
+sub check_out
+ {
+ my ( $url, $options ) = @_;
+ my $wc_dir = "wcstress.$$";
+ mkdir "$wc_dir", 0755 or die "$stress: mkdir wcstress.$$: $!\n";
+ my $svn_cmd = "svn co $url $wc_dir $options";
+ system( $svn_cmd ) and die "$stress: $svn_cmd: failed: $?\n";
+ return $wc_dir;
+ }
+
+# Print status and update. The update is to do any required merges.
+sub status_update
+ {
+ my ( $options, $wc_dir, $wait_for_key, $disable_status,
+ $resolve_conflicts ) = @_;
+ my $svn_cmd = "svn st -u $options $wc_dir";
+ if ( not $disable_status ) {
+ print "Status:\n";
+ system( $svn_cmd ) and die "$stress: $svn_cmd: failed: $?\n";
+ }
+ print "Press return to update/commit\n" if $wait_for_key;
+ read STDIN, $wait_for_key, 1 if $wait_for_key;
+ print "Updating:\n";
+ $svn_cmd = "svn up --non-interactive $options $wc_dir";
+
+ # Check for conflicts during the update. If any exist, we resolve them.
+ my $pid = open3(\*UPDATE_WRITE, \*UPDATE_READ, \*UPDATE_ERR_READ,
+ $svn_cmd);
+ my @conflicts = ();
+ while ( <UPDATE_READ> )
+ {
+ print;
+ s/\r*$//; # [Windows compat] Remove trailing \r's
+ if ( /^C (.*)$/ )
+ {
+ push(@conflicts, ($1))
+ }
+ }
+
+ # Print any errors.
+ my $acceptable_error = 0;
+ while ( <UPDATE_ERR_READ> )
+ {
+ print;
+ if ($dbrecover)
+ {
+ s/\r*$//; # [Windows compat] Remove trailing \r's
+ $acceptable_error = 1 if ( /^svn:[ ]
+ (
+ bdb:[ ]PANIC
+ |
+ DB_RUNRECOVERY
+ )
+ /x );
+ }
+ }
+
+ # Close up the streams.
+ close UPDATE_ERR_READ or die "$stress: close UPDATE_ERR_READ: $!\n";
+ close UPDATE_WRITE or die "$stress: close UPDATE_WRITE: $!\n";
+ close UPDATE_READ or die "$stress: close UPDATE_READ: $!\n";
+
+ # Get commit subprocess exit status
+ die "$stress: waitpid: $!\n" if $pid != waitpid $pid, 0;
+ die "$stress: unexpected update fail: exit status: $?\n"
+ unless $? == 0 or ( $? == 256 and $acceptable_error );
+
+ if ($resolve_conflicts)
+ {
+ foreach my $conflict (@conflicts)
+ {
+ $svn_cmd = "svn resolved $conflict";
+ system( $svn_cmd ) and die "$stress: $svn_cmd: failed: $?\n";
+ }
+ }
+ }
+
+# Print status, update and commit. The update is to do any required
+# merges. Returns 0 if the commit succeeds and 1 if it fails due to a
+# conflict.
+sub status_update_commit
+ {
+ my ( $options, $wc_dir, $wait_for_key, $disable_status,
+ $resolve_conflicts ) = @_;
+ status_update $options, $wc_dir, $wait_for_key, $disable_status, \
+ $resolve_conflicts;
+ print "Committing:\n";
+ # Use current time as log message
+ my $now_time = localtime;
+ # [Windows compat] Must use double quotes for the log message.
+ my $svn_cmd = "svn ci $options $wc_dir -m \"$now_time\"";
+
+ # Need to handle the commit carefully. It could fail for all sorts
+ # of reasons, but errors that indicate a conflict are "acceptable"
+ # while other errors are not. Thus there is a need to check the
+ # return value and parse the error text.
+ my $pid = open3(\*COMMIT_WRITE, \*COMMIT_READ, \*COMMIT_ERR_READ,
+ $svn_cmd);
+ print while ( <COMMIT_READ> );
+
+ # Look for acceptable errors, ones we expect to occur due to conflicts
+ my $acceptable_error = 0;
+ while ( <COMMIT_ERR_READ> )
+ {
+ print;
+ s/\r*$//; # [Windows compat] Remove trailing \r's
+ $acceptable_error = 1 if ( /^svn:[ ]
+ (
+ .*out[ ]of[ ]date
+ |
+ Conflict[ ]at
+ |
+ Baseline[ ]incorrect
+ |
+ )
+ /ix )
+ or ( $dbrecover and ( /^svn:[ ]
+ (
+ bdb:[ ]PANIC
+ |
+ DB_RUNRECOVERY
+ )
+ /x ));
+
+
+ }
+ close COMMIT_ERR_READ or die "$stress: close COMMIT_ERR_READ: $!\n";
+ close COMMIT_WRITE or die "$stress: close COMMIT_WRITE: $!\n";
+ close COMMIT_READ or die "$stress: close COMMIT_READ: $!\n";
+
+ # Get commit subprocess exit status
+ die "$stress: waitpid: $!\n" if $pid != waitpid $pid, 0;
+ die "$stress: unexpected commit fail: exit status: $?\n"
+ if ( $? != 0 and $? != 256 ) or ( $? == 256 and $acceptable_error != 1 );
+
+ return $? == 256 ? 1 : 0;
+ }
+
+# Get a list of all versioned files in the working copy
+{
+ my @get_list_of_files_helper_array;
+ sub GetListOfFilesHelper
+ {
+ $File::Find::prune = 1 if $File::Find::name =~ m[/.svn];
+ return if $File::Find::prune or -d;
+ push @get_list_of_files_helper_array, $File::Find::name;
+ }
+ sub GetListOfFiles
+ {
+ my ( $wc_dir ) = @_;
+ @get_list_of_files_helper_array = ();
+ find( \&GetListOfFilesHelper, $wc_dir);
+ return @get_list_of_files_helper_array;
+ }
+}
+
+# Populate a working copy
+sub populate
+ {
+ my ( $dir, $dir_width, $file_width, $depth, $pad, $props ) = @_;
+ return if not $depth--;
+
+ for my $nfile ( 1..$file_width )
+ {
+ my $filename = "$dir/foo$nfile";
+ open( FOO, ">$filename" ) or die "$stress: open $filename: $!\n";
+
+ for my $line ( 0..9 )
+ {
+ print FOO "A$line\n$line\n"
+ or die "$stress: write to $filename: $!\n";
+ map { print FOO $_ x 255, "\n"; } ("a", "b", "c", "d")
+ foreach (1..$pad);
+ }
+ print FOO "\$HeadURL: \$\n"
+ or die "$stress: write to $filename: $!\n" if $props;
+ close FOO or die "$stress: close $filename: $!\n";
+
+ my $svn_cmd = "svn add $filename";
+ system( $svn_cmd ) and die "$stress: $svn_cmd: failed: $?\n";
+
+ if ( $props )
+ {
+ $svn_cmd = "svn propset svn:eol-style native $filename";
+ system( $svn_cmd ) and die "$stress: $svn_cmd: failed: $?\n";
+
+ $svn_cmd = "svn propset svn:keywords HeadURL $filename";
+ system( $svn_cmd ) and die "$stress: $svn_cmd: failed: $?\n";
+ }
+ }
+
+ if ( $depth )
+ {
+ for my $ndir ( 1..$dir_width )
+ {
+ my $dirname = "$dir/bar$ndir";
+ my $svn_cmd = "svn mkdir $dirname";
+ system( $svn_cmd ) and die "$stress: $svn_cmd: failed: $?\n";
+
+ populate( "$dirname", $dir_width, $file_width, $depth, $pad,
+ $props );
+ }
+ }
+ }
+
+# Modify a versioned file in the working copy
+sub ModFile
+ {
+ my ( $filename, $mod_number, $id ) = @_;
+
+ # Read file into memory replacing the line that starts with our ID
+ open( FOO, "<$filename" ) or die "$stress: open $filename: $!\n";
+ my @lines = map { s[(^$id.*)][$1,$mod_number]; $_ } <FOO>;
+ close FOO or die "$stress: close $filename: $!\n";
+
+ # Write the memory back to the file
+ open( FOO, ">$filename" ) or die "$stress: open $filename: $!\n";
+ print FOO or die "$stress: print $filename: $!\n" foreach @lines;
+ close FOO or die "$stress: close $filename: $!\n";
+ }
+
+sub ParseCommandLine
+ {
+ my %cmd_opts;
+ my $usage = "
+usage: stress.pl [-cdfhprW] [-i num] [-n num] [-s secs] [-x num] [-o options]
+ [-D num] [-F num] [-N num] [-P num] [-R path] [-S path]
+ [-U url]
+
+where
+ -c cause repository creation
+ -d don't make the status calls
+ -f use --fs-type fsfs during repository creation
+ -h show this help information (other options will be ignored)
+ -i the ID (valid IDs are 0 to 9, default is 0 if -c given, 1 otherwise)
+ -n the number of sets of changes to commit
+ -p add svn:eol-style and svn:keywords properties to the files
+ -r perform update-time conflict resolution
+ -s the sleep delay (-1 wait for key, 0 none)
+ -x the number of files to modify in each commit
+ -o options to pass for subversion client
+ -D the number of sub-directories per directory in the tree
+ -F the number of files per directory in the tree
+ -N the depth of the tree
+ -P the number of 10K blocks with which to pad the file
+ -R the path to the repository
+ -S the path to the file whose presence stops this script
+ -U the URL to the repository (file:///<-R path> by default)
+ -W use --bdb-txn-nosync during repository creation
+";
+
+ # defaults
+ $cmd_opts{'D'} = 2; # number of subdirs per dir
+ $cmd_opts{'F'} = 2; # number of files per dir
+ $cmd_opts{'N'} = 2; # depth
+ $cmd_opts{'P'} = 0; # padding blocks
+ $cmd_opts{'R'} = "repostress"; # repository name
+ $cmd_opts{'S'} = "stop"; # path of file to stop the script
+ $cmd_opts{'U'} = "none"; # URL
+ $cmd_opts{'W'} = 0; # create with --bdb-txn-nosync
+ $cmd_opts{'c'} = 0; # create repository
+ $cmd_opts{'d'} = 0; # disable status
+ $cmd_opts{'f'} = 0; # create with --fs-type fsfs
+ $cmd_opts{'h'} = 0; # help
+ $cmd_opts{'i'} = 0; # ID
+ $cmd_opts{'n'} = 200; # sets of changes
+ $cmd_opts{'p'} = 0; # add file properties
+ $cmd_opts{'r'} = 0; # conflict resolution
+ $cmd_opts{'s'} = -1; # sleep interval
+ $cmd_opts{'x'} = 4; # files to modify
+ $cmd_opts{'o'} = ""; # no options passed
+
+ getopts( 'cdfhi:n:prs:x:o:D:F:N:P:R:S:U:W', \%cmd_opts ) or die $usage;
+
+ # print help info (and exit nicely) if requested
+ if ( $cmd_opts{'h'} )
+ {
+ print( $usage );
+ exit 0;
+ }
+
+ # default ID if not set
+ $cmd_opts{'i'} = 1 - $cmd_opts{'c'} if not $cmd_opts{'i'};
+ die $usage if $cmd_opts{'i'} !~ /^[0-9]$/;
+
+ return %cmd_opts;
+ }
+
+############################################################################
+# Main
+
+# Why the fixed seed? I use this script for more than stress testing,
+# I also use it to create test repositories. When creating a test
+# repository, while I don't care exactly which files get modified, I
+# find it useful for the repositories to be reproducible, i.e. to have
+# the same files modified each time. When using this script for
+# stress testing one could remove this fixed seed and Perl will
+# automatically use a pseudo-random seed. However it doesn't much
+# matter, the stress testing really depends on the real-time timing
+# differences between mutiple instances of the script, rather than the
+# randomness of the chosen files.
+srand 123456789;
+
+my %cmd_opts = ParseCommandLine();
+
+my $repo = init_repo( $cmd_opts{'R'}, $cmd_opts{'c'}, $cmd_opts{'W'},
+ $cmd_opts{'f'} );
+
+# [Windows compat]
+# Replace backslashes in the path, and tweak the number of slashes
+# in the scheme separator to make the URL always correct.
+my $urlsep = ($repo =~ m/^\// ? '//' : '///');
+$repo =~ s/\\/\//g;
+
+# Make URL from path if URL not explicitly specified
+$cmd_opts{'U'} = "file:$urlsep$repo" if $cmd_opts{'U'} eq "none";
+
+my $wc_dir = check_out $cmd_opts{'U'}, $cmd_opts{'o'};
+
+if ( $cmd_opts{'c'} )
+ {
+ my $svn_cmd = "svn mkdir $wc_dir/trunk";
+ system( $svn_cmd ) and die "$stress: $svn_cmd: failed: $?\n";
+ populate( "$wc_dir/trunk", $cmd_opts{'D'}, $cmd_opts{'F'}, $cmd_opts{'N'},
+ $cmd_opts{'P'}, $cmd_opts{'p'} );
+ status_update_commit $cmd_opts{'o'}, $wc_dir, 0, 1
+ and die "$stress: populate checkin failed\n";
+ }
+
+my @wc_files = GetListOfFiles $wc_dir;
+die "$stress: not enough files in repository\n"
+ if $#wc_files + 1 < $cmd_opts{'x'};
+
+my $wait_for_key = $cmd_opts{'s'} < 0;
+
+my $stop_file = $cmd_opts{'S'};
+
+for my $mod_number ( 1..$cmd_opts{'n'} )
+ {
+ my @chosen;
+ for ( 1..$cmd_opts{'x'} )
+ {
+ # Extract random file from list and modify it
+ my $mod_file = splice @wc_files, int rand $#wc_files, 1;
+ ModFile $mod_file, $mod_number, $cmd_opts{'i'};
+ push @chosen, $mod_file;
+ }
+ # Reinstate list of files, the order doesn't matter
+ push @wc_files, @chosen;
+
+ if ( $cmd_opts{'x'} > 0 ) {
+ # Loop committing until successful or the stop file is created
+ 1 while not -e $stop_file
+ and status_update_commit $cmd_opts{'o'}, $wc_dir, $wait_for_key, \
+ $cmd_opts{'d'}, $cmd_opts{'r'};
+ } else {
+ status_update $cmd_opts{'o'}, $wc_dir, $wait_for_key, $cmd_opts{'d'}, \
+ $cmd_opts{'r'};
+ }
+
+ # Break out of loop, or sleep, if required
+ print( "stop file '$stop_file' detected\n" ), last if -e $stop_file;
+ sleep $cmd_opts{'s'} if $cmd_opts{'s'} > 0;
+ }
+
diff --git a/tools/dev/svn-dev.el b/tools/dev/svn-dev.el
new file mode 100644
index 0000000..2fc32c3
--- /dev/null
+++ b/tools/dev/svn-dev.el
@@ -0,0 +1,566 @@
+;;;; Emacs Lisp help for writing Subversion code. ;;;;
+
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements. See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership. The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing,
+;; software distributed under the License is distributed on an
+;; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+;; KIND, either express or implied. See the License for the
+;; specific language governing permissions and limitations
+;; under the License.
+
+
+;; Later on, there will be auto-detection of svn files, modeline
+;; status, and a whole library of routines to interface with the
+;; command-line client. For now, there's this, at Ben's request.
+;;
+;; All this stuff should get folded into Emacs VC mode, really.
+
+(defun svn-revert ()
+ "Revert the current buffer and its file to its svn base revision."
+ (interactive)
+ (let ((obuf (current-buffer))
+ (fname (buffer-file-name))
+ (outbuf (get-buffer-create "*svn output*")))
+ (set-buffer outbuf)
+ (delete-region (point-min) (point-max))
+ (call-process "svn" nil outbuf nil "status" fname)
+ (goto-char (point-min))
+ (search-forward fname)
+ (beginning-of-line)
+ (if (looking-at "^?")
+ (error "\"%s\" is not a Subversion-controlled file" fname))
+ (call-process "svn" nil outbuf nil "revert" fname)
+ (set-buffer obuf)
+ ;; todo: make a backup~ file?
+ (save-excursion
+ (revert-buffer nil t)
+ (save-buffer))
+ (message "Reverted \"%s\"." fname)))
+
+(defun svn-resolved ()
+ "Tell Subversion that conflicts in the current buffer and its file have
+been resolved."
+ (interactive)
+ (let ((obuf (current-buffer))
+ (fname (buffer-file-name))
+ (outbuf (get-buffer-create "*svn output*")))
+ (set-buffer outbuf)
+ (delete-region (point-min) (point-max))
+ (call-process "svn" nil outbuf nil "status" fname)
+ (goto-char (point-min))
+ (search-forward fname)
+ (beginning-of-line)
+ (if (looking-at "^?")
+ (error "\"%s\" is not a Subversion-controlled file" fname))
+ (call-process "svn" nil outbuf nil "resolved" fname)
+ (set-buffer obuf)
+ ;; todo: make a backup~ file?
+ (save-excursion
+ (revert-buffer nil t)
+ (save-buffer))
+ (message "Marked \"%s\" as conflict-free." fname)))
+
+(defconst svn-adm-area ".svn"
+ "The name of the Subversion administrative subdirectory.")
+
+(defconst svn-adm-entries ".svn/entries"
+ "The path from cwd to the Subversion entries file.")
+
+(defun svn-controlled-path-p (path)
+ "Return non-nil if PATH is under Subversion version control, else
+return nil. If PATH does not exist, return nil.
+
+In the future, this will return an Emacs Lisp reflection of PATH's
+entry, either an explicit svn-entry-struct, or a list of the form
+\(LAST-COMMIT-REV CURRENT-REV LAST-COMMITTER ...\), so we can display
+svn information in the mode line. But that requires truly parsing the
+entries file, instead of just detecting PATH among the entries."
+ (interactive "f") ; any use for interactive, other than testing?
+ (cond
+ ((not (file-exists-p path))
+ nil)
+ ((file-directory-p path)
+ (let ((adm-area (concat path "/" svn-adm-area)))
+ (if (file-directory-p adm-area)
+ t
+ nil)))
+ (t
+ (let ((entries (concat (file-name-directory path) svn-adm-entries))
+ (basename (file-name-nondirectory path))
+ (found nil))
+ (save-excursion
+ (if (file-directory-p (concat (file-name-directory path) svn-adm-area))
+ (progn
+ (let ((find-file-hooks nil))
+ (set-buffer (find-file-noselect entries t)))
+ (goto-char (point-min))
+ (if (search-forward (format "name=\"%s\"" basename) nil t)
+ (setq found t)
+ (setq found nil))
+ (kill-buffer nil)))
+ found)))))
+
+
+(defun svn-text-base-path (file)
+ "Return the path to the text base for FILE (a string).
+If FILE is a directory or not under version control, return nil."
+ (cond
+ ((not (svn-controlled-path-p file)) nil)
+ ((file-directory-p file) nil)
+ (t
+ (let* ((pdir (file-name-directory file))
+ (base (file-name-nondirectory file)))
+ (format "%s%s/text-base/%s.svn-base" (or pdir "") svn-adm-area base)))))
+
+
+(defun svn-ediff (file)
+ "Ediff FILE against its text base."
+ (interactive "fsvn ediff: ")
+ (let ((tb (svn-text-base-path file)))
+ (if (not tb)
+ (error "No text base for %s" file)
+ (ediff-files file tb))))
+
+
+(defun svn-find-file-hook ()
+ "Function for find-file-hooks.
+Inhibit backup files unless `vc-make-backup-files' is non-nil."
+ (if (svn-controlled-path-p (buffer-file-name))
+ (progn
+ (if (string-match "XEMACS\\|XEmacs\\|xemacs" emacs-version)
+ (vc-load-vc-hooks)) ; for `vc-make-backup-files'
+ (unless vc-make-backup-files
+ (make-local-variable 'backup-inhibited)
+ (setq backup-inhibited t)))))
+
+(add-hook 'find-file-hooks 'svn-find-file-hook)
+
+
+
+;;; Dynamic generation of common Subversion URLs.
+;;;
+;;; (I have a version of this that actually fetches the stuff from the
+;;; Net if you don't have a local copy, but it requires a very recent
+;;; version of Emacs, so I didn't bother with it here. -kfogel)
+
+(defvar svn-site-source-tree-top (expand-file-name "~/projects/svn/site/")
+ "*Top directory of your Subversion site source tree of
+repository \"http://svn.apache.org/repos/asf/subversion/site\".
+You almost certainly want to set this in your .emacs, to override
+the default; use `(setq svn-site-source-tree-top
+\"/path/to/the/site/tree\")'.")
+
+(defvar svn-faq-file (concat svn-site-source-tree-top "/publish/faq.html")
+ "*A local copy of the Subversion FAQ.")
+
+(defvar svn-hacking-file (concat svn-site-source-tree-top
+ "/docs/community-guide/community-guide.html")
+ "*A local copy of the Subversion hacking.html file.")
+
+;; Helper for referring to issue numbers in a user-friendly way.
+(defun svn-bug-url (n)
+ "Insert the url for Subversion issue number N. Interactively, prompt for N."
+ (interactive "nSubversion issue number: ")
+ (insert (format "http://subversion.tigris.org/issues/show_bug.cgi?id=%d" n)))
+
+;; Helper for referring to revisions in a browser-friendly way.
+(defun svn-rev-url (rev &optional transform)
+ "Insert the url for Subversion revision REV, or if TRANSFORM is not
+nil, then transform the revision at or around point into an HTML link.
+
+Interactively, if at or inside a revision number, transform it into
+full HTML link; otherwise, prompt for revision number and insert just
+the resulting URL."
+ (interactive (let ((thing (thing-at-point 'word)))
+ (if (and thing (string-match "r[0-9]+" thing))
+ (list thing t)
+ (list (read-string "Subversion revision number: ") nil))))
+ (if (string-match "^r[0-9]+" rev)
+ (setq rev (substring rev 1)))
+ (if transform
+ (let* ((bounds (bounds-of-thing-at-point 'word))
+ (start (car bounds))
+ (end (cdr bounds)))
+ (delete-region start end)))
+ (insert (format "http://svn.apache.org/viewcvs?view=revision&revision=%s"
+ rev)))
+
+(defconst svn-url-base "http://subversion.apache.org/")
+(defconst svn-faq-url (concat svn-url-base "faq.html"))
+(defconst svn-hacking-url (concat svn-url-base
+ "docs/community-guide/community-guide.html"))
+
+(defun svn-html-get-targets (file)
+ "Build a list of targets for the Subversion web file FILE."
+ (let* ((lst nil)
+ (already-buffer (find-buffer-visiting file))
+ (faq-buffer (or already-buffer (find-file-noselect file))))
+ (save-excursion
+ (set-buffer faq-buffer)
+ (goto-char (point-min))
+ ;; TODO: Ideally, this wouldn't depend on the presence of a
+ ;; table of contents with "#" URLs, it would read the divs and
+ ;; anchors themselves.
+ (while (search-forward "href=\"#" nil t)
+ (let ((b (point))
+ (e (progn (search-forward "\"") (forward-char -1) (point))))
+ (setq lst (cons (buffer-substring b e) lst))))
+ (if (not already-buffer)
+ (kill-buffer (current-buffer)))
+ lst)))
+
+(defun svn-url-completing-read (file prompt &optional hist-list)
+ "Completingly read an HTML target for FILE, prompting with PROMPT.
+If HIST-LIST is non-nil, it is a symbol: the completion history list to use."
+ (progn
+ (let* ((targets (svn-html-get-targets file))
+ (target-str (completing-read prompt targets nil t nil hist-list)))
+ (list target-str))))
+
+(defvar svn-faq-history-list nil
+ "History list for the 'svn-faq-url' prompt.")
+
+(defvar svn-hacking-history-list nil
+ "History list for the 'svn-hacking-url' prompt.")
+
+(defun svn-faq-url (target)
+ "Prompt with completion for a targeted SVN FAQ item, then insert it.
+If called non-interactively, TARGET is the target within the faq (an
+HTML anchor name, that is, the part after the \"#\")."
+ (interactive
+ (svn-url-completing-read svn-faq-file "FAQ entry: "
+ 'svn-faq-history-list))
+ (insert svn-faq-url "#" target))
+
+(defun svn-hacking-url (target)
+ "Prompt with completion for a targeted hacking.html item, then insert it.
+If called non-interactively, TARGET is the target within hacking.html
+(an HTML anchor name, that is, the part after the \"#\")."
+ (interactive
+ (svn-url-completing-read svn-hacking-file "hacking.html entry: "
+ 'svn-hacking-history-list))
+ (insert svn-hacking-url "#" target))
+
+
+
+;;; Subversion C conventions
+(if (eq major-mode 'c-mode)
+ (progn
+ (c-add-style "svn" '("gnu" (c-offsets-alist . ((inextern-lang . 0)))))
+ (c-set-style "svn")))
+(setq indent-tabs-mode nil)
+(setq angry-mob-with-torches-and-pitchforks t)
+
+
+
+;; Subversion Python conventions, plus some harmless helpers for
+;; people who don't have python mode set up by default.
+(autoload 'python-mode "python-mode" nil t)
+(or (assoc "\\.py$" auto-mode-alist)
+ (setq auto-mode-alist
+ (cons '("\\.py$" . python-mode) auto-mode-alist)))
+
+(defun svn-python-mode-hook ()
+ "Set up the Subversion python conventions. The effect of this is
+local to the current buffer, which is presumably visiting a file in
+the Subversion project. Python setup in other buffers will not be
+affected."
+ (when (string-match "/subversion/" (buffer-file-name))
+ (make-local-variable 'py-indent-offset)
+ (setq indent-tabs-mode nil)
+ (setq py-indent-offset 2)
+ (make-local-variable 'py-smart-indentation)
+ (setq py-smart-indentation nil)))
+
+(add-hook 'python-mode-hook 'svn-python-mode-hook)
+
+
+
+;; Much of the APR documentation is embedded perldoc format. The
+;; perldoc program itself sucks, however. If you're the author of
+;; perldoc, I'm sorry, but what were you thinking? Don't you know
+;; that there are people in the world who don't work in vt100
+;; terminals? If I want to view a perldoc page in my Emacs shell
+;; buffer, I have to run the ridiculous command
+;;
+;; $ PAGER=cat perldoc -t target_file
+;;
+;; (Not that this was documented anywhere, I had to figure it out for
+;; myself by reading /usr/bin/perldoc).
+;;
+;; Non-paging behavior should be a standard command-line option. No
+;; program that can output text should *ever* insist on invoking the
+;; pager.
+;;
+;; Anyway, these Emacs commands will solve the problem for us.
+;;
+;; Acknowledgements:
+;; Much of this code is copied from man.el in the FSF Emacs 21.x
+;; sources.
+
+(defcustom svn-perldoc-overstrike-face 'bold
+ "*Face to use when fontifying overstrike."
+ :type 'face
+ :group 'svn-dev)
+
+(defcustom svn-perldoc-underline-face 'underline
+ "*Face to use when fontifying underlining."
+ :type 'face
+ :group 'svn-dev)
+
+
+(defun svn-perldoc-softhyphen-to-minus ()
+ ;; \255 is some kind of dash in Latin-N. Versions of Debian man, at
+ ;; least, emit it even when not in a Latin-N locale.
+ (unless (eq t (compare-strings "latin-" 0 nil
+ current-language-environment 0 6 t))
+ (goto-char (point-min))
+ (let ((str "\255"))
+ (if enable-multibyte-characters
+ (setq str (string-as-multibyte str)))
+ (while (search-forward str nil t) (replace-match "-")))))
+
+
+(defun svn-perldoc-fontify-buffer ()
+ "Convert overstriking and underlining to the correct fonts.
+Same for the ANSI bold and normal escape sequences."
+ (interactive)
+ (message "Please wait, making up the page...")
+ (goto-char (point-min))
+ (while (search-forward "\e[1m" nil t)
+ (delete-backward-char 4)
+ (put-text-property (point)
+ (progn (if (search-forward "\e[0m" nil 'move)
+ (delete-backward-char 4))
+ (point))
+ 'face svn-perldoc-overstrike-face))
+ (goto-char (point-min))
+ (while (search-forward "_\b" nil t)
+ (backward-delete-char 2)
+ (put-text-property (point) (1+ (point)) 'face svn-perldoc-underline-face))
+ (goto-char (point-min))
+ (while (search-forward "\b_" nil t)
+ (backward-delete-char 2)
+ (put-text-property (1- (point)) (point) 'face svn-perldoc-underline-face))
+ (goto-char (point-min))
+ (while (re-search-forward "\\(.\\)\\(\b\\1\\)+" nil t)
+ (replace-match "\\1")
+ (put-text-property (1- (point)) (point) 'face svn-perldoc-overstrike-face))
+ (goto-char (point-min))
+ (while (re-search-forward "o\b\\+\\|\\+\bo" nil t)
+ (replace-match "o")
+ (put-text-property (1- (point)) (point) 'face 'bold))
+ (goto-char (point-min))
+ (while (re-search-forward "[-|]\\(\b[-|]\\)+" nil t)
+ (replace-match "+")
+ (put-text-property (1- (point)) (point) 'face 'bold))
+ (svn-perldoc-softhyphen-to-minus)
+ (message "Please wait, making up the page...done"))
+
+
+(defun svn-perldoc-cleanup-buffer ()
+ "Remove overstriking and underlining from the current buffer."
+ (interactive)
+ (message "Please wait, cleaning up the page...")
+ (progn
+ (goto-char (point-min))
+ (while (search-forward "_\b" nil t) (backward-delete-char 2))
+ (goto-char (point-min))
+ (while (search-forward "\b_" nil t) (backward-delete-char 2))
+ (goto-char (point-min))
+ (while (re-search-forward "\\(.\\)\\(\b\\1\\)+" nil t)
+ (replace-match "\\1"))
+ (goto-char (point-min))
+ (while (re-search-forward "\e\\[[0-9]+m" nil t) (replace-match ""))
+ (goto-char (point-min))
+ (while (re-search-forward "o\b\\+\\|\\+\bo" nil t) (replace-match "o"))
+ (goto-char (point-min))
+ (while (re-search-forward "" nil t) (replace-match " ")))
+ (goto-char (point-min))
+ (while (re-search-forward "[-|]\\(\b[-|]\\)+" nil t) (replace-match "+"))
+ (svn-perldoc-softhyphen-to-minus)
+ (message "Please wait, cleaning up the page...done"))
+
+
+;; Entry point to svn-perldoc functionality.
+(defun svn-perldoc (file)
+ "Run perldoc on FILE, display the output in a buffer."
+ (interactive "fRun perldoc on file: ")
+ (let ((outbuf (get-buffer-create
+ (format "*%s PerlDoc*" (file-name-nondirectory file))))
+ (savepg (getenv "PAGER")))
+ (setenv "PAGER" "cat") ;; for perldoc
+ (save-excursion
+ (set-buffer outbuf)
+ (delete-region (point-min) (point-max))
+ (call-process "perldoc" nil outbuf nil (expand-file-name file))
+ (svn-perldoc-fontify-buffer)
+ (svn-perldoc-cleanup-buffer)
+ ;; Clean out the inevitable leading dead space.
+ (goto-char (point-min))
+ (re-search-forward "[^ \i\n]")
+ (beginning-of-line)
+ (delete-region (point-min) (point)))
+ (setenv "PAGER" savepg)
+ (display-buffer outbuf)))
+
+
+
+;;; Help developers write log messages.
+
+;; How to use this: just run `svn-log-message'. You might want to
+;; bind it to a key, for example,
+;;
+;; (define-key "\C-cl" 'svn-log-message)
+;;
+;; The log message will accumulate in a file. Later, you can use
+;; that file when you commit:
+;;
+;; $ svn ci -F msg ...
+
+(defun svn-log-path-derive (path)
+ "Derive a relative directory path for absolute PATH, for a log entry."
+ (save-match-data
+ (let ((base (file-name-nondirectory path))
+ (chop-spot (string-match
+ "\\(code/\\)\\|\\(src/\\)\\|\\(projects/\\)"
+ path)))
+ (if chop-spot
+ (progn
+ (setq path (substring path (match-end 0)))
+ ;; Kluge for Subversion developers.
+ (if (string-match "subversion/" path)
+ (substring path (+ (match-beginning 0) 11))
+ path))
+ (string-match (expand-file-name "~/") path)
+ (substring path (match-end 0))))))
+
+
+(defun svn-log-message-file ()
+ "Return the name of the appropriate log message accumulation file.
+Usually this is just the file `msg' in the current directory, but
+certain areas are treated specially, for example, the Subversion
+source tree."
+ (save-match-data
+ (if (string-match "subversion" default-directory)
+ (concat (substring default-directory 0 (match-end 0)) "/msg")
+ "msg")))
+
+
+(defun svn-log-message (short-file-names)
+ "Add to an in-progress log message, based on context around point.
+If prefix arg SHORT-FILE-NAMES is non-nil, then use basenames only in
+log messages, otherwise use full paths. The current defun name is
+always used.
+
+If the log message already contains material about this defun, then put
+point there, so adding to that material is easy.
+
+Else if the log message already contains material about this file, put
+point there, and push onto the kill ring the defun name with log
+message dressing around it, plus the raw defun name, so yank and
+yank-next are both useful.
+
+Else if there is no material about this defun nor file anywhere in the
+log message, then put point at the end of the message and insert a new
+entry for file with defun.
+
+See also the function `svn-log-message-file'."
+ (interactive "P")
+ (let ((this-file (if short-file-names
+ (file-name-nondirectory buffer-file-name)
+ (svn-log-path-derive buffer-file-name)))
+ (this-defun (or (add-log-current-defun)
+ (save-excursion
+ (save-match-data
+ (if (eq major-mode 'c-mode)
+ (progn
+ (if (fboundp 'c-beginning-of-statement-1)
+ (c-beginning-of-statement-1)
+ (c-beginning-of-statement))
+ (search-forward "(" nil t)
+ (forward-char -1)
+ (forward-sexp -1)
+ (buffer-substring
+ (point)
+ (progn (forward-sexp 1) (point)))))))))
+ (log-file (svn-log-message-file)))
+ (find-file log-file)
+ (goto-char (point-min))
+ ;; Strip text properties from strings
+ (set-text-properties 0 (length this-file) nil this-file)
+ (set-text-properties 0 (length this-defun) nil this-defun)
+ ;; If log message for defun already in progress, add to it
+ (if (and
+ this-defun ;; we have a defun to work with
+ (search-forward this-defun nil t) ;; it's in the log msg already
+ (save-excursion ;; and it's about the same file
+ (save-match-data
+ (if (re-search-backward ; Ick, I want a real filename regexp!
+ "^\\*\\s-+\\([a-zA-Z0-9-_.@=+^$/%!?(){}<>]+\\)" nil t)
+ (string-equal (match-string 1) this-file)
+ t))))
+ (if (re-search-forward ":" nil t)
+ (if (looking-at " ") (forward-char 1)))
+ ;; Else no log message for this defun in progress...
+ (goto-char (point-min))
+ ;; But if log message for file already in progress, add to it.
+ (if (search-forward this-file nil t)
+ (progn
+ (if this-defun (progn
+ (kill-new (format "(%s): " this-defun))
+ (kill-new this-defun)))
+ (search-forward ")" nil t)
+ (if (looking-at " ") (forward-char 1)))
+ ;; Found neither defun nor its file, so create new entry.
+ (goto-char (point-max))
+ (if (not (bolp)) (insert "\n"))
+ (insert (format "\n* %s (%s): " this-file (or this-defun "")))
+ ;; Finally, if no derived defun, put point where the user can
+ ;; type it themselves.
+ (if (not this-defun) (forward-char -3))))))
+
+
+
+;;; Log message helpers.
+
+(defconst svn-log-msg-sep-line
+ "------------------------------------------------------------------------"
+ "The line of dashes that separates log messages in 'svn log' output.")
+
+(defconst svn-log-msg-boundary-regexp
+ (concat "^" svn-log-msg-sep-line "\n" "r[0-9]+ | ")
+ "Regular expression matching the start of a log msg. The start is
+the beginning of the separator line, not the rev/author/date line that
+follows the separator line.")
+
+(defun svn-narrow-to-log-msg ()
+ "Narrow to the current Subversion log message.
+This meant to be used while browsing the output of 'svn log'.
+If point is not in such output, error."
+ (interactive)
+ (let ((start nil) (end nil))
+ (save-excursion
+ (re-search-backward svn-log-msg-boundary-regexp)
+ (forward-line 1)
+ (setq start (point))
+ (end-of-line)
+ (re-search-backward "| \\([0-9]+\\) ")
+ (let ((num (match-string 1)))
+ (re-search-forward "^\n")
+ (forward-line (string-to-number num)))
+ (setq end (point)))
+ (narrow-to-region start end)))
+
+
+
+(message "loaded svn-dev.el")
diff --git a/tools/dev/svn-dev.vim b/tools/dev/svn-dev.vim
new file mode 100644
index 0000000..cf2c50d
--- /dev/null
+++ b/tools/dev/svn-dev.vim
@@ -0,0 +1,76 @@
+" This file sets vim up to use subversion's coding style. It can be applied on
+" a per buffer basis with :source svn-dev.vim, or can be source from ~/.vimrc to
+" apply settings to all files vim uses. For other variation try :help autocmd.
+"
+" Licensed to the Apache Software Foundation (ASF) under one
+" or more contributor license agreements. See the NOTICE file
+" distributed with this work for additional information
+" regarding copyright ownership. The ASF licenses this file
+" to you under the Apache License, Version 2.0 (the
+" "License"); you may not use this file except in compliance
+" with the License. You may obtain a copy of the License at
+"
+" http://www.apache.org/licenses/LICENSE-2.0
+"
+" Unless required by applicable law or agreed to in writing,
+" software distributed under the License is distributed on an
+" "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+" KIND, either express or implied. See the License for the
+" specific language governing permissions and limitations
+" under the License.
+"
+" TODO: Try to find a way to wrap comments without putting a * on the next line,
+" since most of subversion doesn't use that style. (Note that taking cro out of
+" formatoptions won't quite work, because then comments won't be wrapped by
+" default).
+"
+" Expand tab characters to spaces
+set expandtab
+
+" Tab key moves 8 spaces
+set tabstop=8
+
+" '>>' moves 4 spaces
+set shiftwidth=4
+
+" Wrap lines at 78 columns.
+" 78 so that vim won't swap over to the right before it wraps a line.
+set textwidth=78
+
+" What counts as part of a word (used for tag matching, and motion commands)
+set iskeyword=a-z,A-Z,48-57,_,.,-,>
+
+" How to wrap lines
+" t=wrap lines, c=wrap comments, inserting comment leader, r=insert comment
+" leader after an <ENTER>, o=Insert comment leader after an 'o', q=Allow
+" formatting of comments with 'gq'
+set formatoptions=tcroq
+
+" Use C style indenting
+set cindent
+
+" Use the following rules to do C style indenting
+" (Note that an s mean number*shiftwidth)
+" >=normal indent,
+" e=indent inside braces(brace at end of line),
+" n=Added to normal indent if no braces,
+" f=opening brace of function,
+" {=opening braces,
+" }=close braces (from opening),
+" ^s=indent after brace, if brace is on column 0,
+" := case labels from switch, ==statements after case,
+" t=function return type,
+" +=continuation line,
+" c=comment lines from opener,
+" (=unclosed parens (0 means match),
+" u=same as ( but for second set of parens
+"
+" Try :help cinoptions-values
+set cinoptions=>1s,e0,n-2,f0,{.5s,}0,^-.5s,=.5s,t0,+1s,c3,(0,u0,\:2
+
+" The following modelines can also be used to set the same options.
+"/*
+" * vim:ts=8:sw=4:expandtab:tw=78:fo=tcroq cindent
+" * vim:isk=a-z,A-Z,48-57,_,.,-,>
+" * vim:cino=>1s,e0,n-2,f0,{.5s,}0,^-.5s,=.5s,t0,+1s,c3,(0,u0,\:2
+" */
diff --git a/tools/dev/svn-entries.el b/tools/dev/svn-entries.el
new file mode 100644
index 0000000..fff322a
--- /dev/null
+++ b/tools/dev/svn-entries.el
@@ -0,0 +1,156 @@
+;;; svn-entries.el --- Display .svn/entries field names to the left
+
+;; Copyright (C) 2007 David Glasser
+
+;; Licensed under the same license as Subversion.
+
+;; Licensed to the Apache Software Foundation (ASF) under one
+;; or more contributor license agreements. See the NOTICE file
+;; distributed with this work for additional information
+;; regarding copyright ownership. The ASF licenses this file
+;; to you under the Apache License, Version 2.0 (the
+;; "License"); you may not use this file except in compliance
+;; with the License. You may obtain a copy of the License at
+;;
+;; http://www.apache.org/licenses/LICENSE-2.0
+;;
+;; Unless required by applicable law or agreed to in writing,
+;; software distributed under the License is distributed on an
+;; "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+;; KIND, either express or implied. See the License for the
+;; specific language governing permissions and limitations
+;; under the License.
+
+;;; Commentary:
+
+;; Display field names to the left of the lines in a .svn/entries
+;; buffer. Copy svn-entries.el to your load-path and add to your
+;; .emacs:
+
+;; (require 'svn-entries)
+
+;; After opening or editing an entries file, run
+
+;; M-x svn-entries-show
+
+;; To hide:
+
+;; M-x svn-entries-hide
+
+;; (I tried doing this as a minor mode but setting margins during
+;; alist initialization didn't work...)
+
+;; Tested on FSF Emacs 22.
+
+
+(defvar svn-entries-overlays nil "Overlays used in this buffer.")
+(make-variable-buffer-local 'svn-entries-overlays)
+
+(defgroup svn-entries nil
+ "Show labels to the left of .svn/entries buffers"
+ :group 'convenience)
+
+(defface svn-entries
+ '((t :inherit shadow))
+ "Face for displaying line numbers in the display margin."
+ :group 'svn-entries)
+
+(defun svn-entries-set-margins (buf margin)
+ (dolist (w (get-buffer-window-list buf nil t))
+ (set-window-margins w margin)))
+
+(defun svn-entries-hide ()
+ "Delete all overlays displaying labels for this buffer."
+ (interactive)
+ (mapc #'delete-overlay svn-entries-overlays)
+ (setq svn-entries-overlays nil)
+ (svn-entries-set-margins (current-buffer) 0)
+ (remove-hook 'window-configuration-change-hook
+ 'svn-entries-after-config t))
+
+(defun svn-entries-show ()
+ "Update labels for the current buffer."
+ (interactive)
+ (svn-entries-update (current-buffer))
+ (add-hook 'window-configuration-change-hook
+ 'svn-entries-after-config nil t))
+
+(defconst svn-entries-labels
+ ["name"
+ "kind"
+ "revision"
+ "url"
+ "repos"
+ "schedule"
+ "text-time"
+ "checksum"
+ "committed-date"
+ "committed-rev"
+ "last-author"
+ "has-props"
+ "has-prop-mods"
+ "cachable-props"
+ "present-props"
+ "conflict-old"
+ "conflict-new"
+ "conflict-wrk"
+ "prop-reject-file"
+ "copied"
+ "copyfrom-url"
+ "copyfrom-rev"
+ "deleted"
+ "absent"
+ "incomplete"
+ "uuid"
+ "lock-token"
+ "lock-owner"
+ "lock-comment"
+ "lock-creation-date"
+ "changelist"
+ "keep-local"
+ "working-size"
+ "depth"])
+
+(defconst svn-entries-margin-width (length "lock-creation-date"))
+
+(defun svn-entries-update (buffer)
+ "Update labels for all windows displaying BUFFER."
+ (with-current-buffer buffer
+ (svn-entries-hide)
+ (save-excursion
+ (save-restriction
+ (widen)
+ (let ((last-line (line-number-at-pos (point-max)))
+ (field 0)
+ (done nil))
+ (goto-char (point-min))
+ (while (not done)
+ (cond ((= (point) 1)
+ (svn-entries-overlay-here "format"))
+ ((= (following-char) 12) ; ^L
+ (setq field 0))
+ ((not (eobp))
+ (svn-entries-overlay-here (elt svn-entries-labels field))
+ (setq field (1+ field))))
+ (setq done (> (forward-line) 0))))))
+ (svn-entries-set-margins buffer svn-entries-margin-width)))
+
+(defun svn-entries-overlay-here (label)
+ (let* ((fmt-label (propertize label 'face 'svn-entries))
+ (left-label (propertize " " 'display `((margin left-margin)
+ ,fmt-label)))
+ (ov (make-overlay (point) (point))))
+ (push ov svn-entries-overlays)
+ (overlay-put ov 'before-string left-label)))
+
+(defun svn-entries-after-config ()
+ (walk-windows (lambda (w) (svn-entries-set-margins-if-overlaid (window-buffer)))
+ nil 'visible))
+
+(defun svn-entries-set-margins-if-overlaid (b)
+ (with-current-buffer b
+ (when svn-entries-overlays
+ (svn-entries-set-margins b svn-entries-margin-width))))
+
+(provide 'svn-entries)
+;;; svn-entries.el ends here
diff --git a/tools/dev/svn-merge-revs.py b/tools/dev/svn-merge-revs.py
new file mode 100755
index 0000000..f67dae4
--- /dev/null
+++ b/tools/dev/svn-merge-revs.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+import sys
+import os
+
+progname = os.path.basename(sys.argv[0])
+
+def usage():
+ print("Usage: %s SOURCEURL WCPATH [r]REVNUM[,] [...]" % progname)
+ print("Try '%s --help' for more information" % progname)
+
+def help():
+ val = """This script is meant to ease the pain of merging and
+reviewing revision(s) on a release branch (although it can be used to
+merge and review revisions from any line of development to another).
+
+To allow cutting and pasting from the STATUS file, revision numbers
+can be space or comma-separated, and may also include the prefix
+'r'.
+
+Lastly, a file (named 'rev1-rev2-rev3.log') is created for you.
+This file contains each merge command that was run, the log of the
+revision that was merged, and the diff from the previous revision.
+
+Examples:
+
+ %s http://svn.apache.org/repos/asf/subversion/trunk svn-1.2.x-branch \
+ r14041, r14149, r14186, r14194, r14238, r14273
+
+ %s http://svn.apache.org/repos/asf/subversion/trunk svn-1.2.x-branch \
+ 14041 14149 14186 14194 14238 14273""" % (progname, progname)
+ print(val)
+
+
+if len(sys.argv) > 1 and sys.argv[1] == '--help':
+ help()
+ sys.exit(0)
+
+if len(sys.argv) < 4:
+ usage()
+ sys.exit(255)
+
+src_url = sys.argv[1]
+wc_path = sys.argv[2]
+
+# Tolerate comma separated lists of revs (e.g. "r234, r245, r251")
+revs = []
+for rev in sys.argv[3:]:
+ orig_rev = rev
+ if rev[-1:] == ',':
+ rev = rev[:-1]
+
+ if rev[:1] == 'r':
+ rev = rev[1:]
+
+ try:
+ rev = int(rev)
+ except ValueError:
+ print("Encountered non integer revision '%s'" % orig_rev)
+ usage()
+ sys.exit(254)
+ revs.append(rev)
+
+# Make an easily reviewable logfile
+logfile = "-".join([str(x) for x in revs]) + ".log"
+log = open(logfile, 'w')
+
+for rev in revs:
+ merge_cmd = ("svn merge -r%i:%i %s %s" % (rev - 1, rev, src_url, wc_path))
+ log_cmd = 'svn log -v -r%i %s' % (rev, src_url)
+ diff_cmd = 'svn diff -r%i:%i %s' % (rev -1, rev, src_url)
+
+ # Do the merge
+ os.system(merge_cmd)
+
+ # Write our header
+ log.write("=" * 72 + '\n')
+ log.write(merge_cmd + '\n')
+
+ # Get our log
+ fh = os.popen(log_cmd)
+ while True:
+ line = fh.readline()
+ if not line:
+ break
+ log.write(line)
+ fh.close()
+
+ # Get our diff
+ fh = os.popen(diff_cmd)
+ while True:
+ line = fh.readline()
+ if not line:
+ break
+ log.write(line)
+
+ # Write our footer
+ log.write("=" * 72 + '\n' * 10)
+
+
+log.close()
+print("\nYour logfile is '%s'" % logfile)
diff --git a/tools/dev/svnmover/linenoise/LICENSE b/tools/dev/svnmover/linenoise/LICENSE
new file mode 100644
index 0000000..18e8148
--- /dev/null
+++ b/tools/dev/svnmover/linenoise/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2010-2014, Salvatore Sanfilippo <antirez at gmail dot com>
+Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/tools/dev/svnmover/linenoise/README.markdown b/tools/dev/svnmover/linenoise/README.markdown
new file mode 100644
index 0000000..c845673
--- /dev/null
+++ b/tools/dev/svnmover/linenoise/README.markdown
@@ -0,0 +1,52 @@
+# Linenoise
+
+A minimal, zero-config, BSD licensed, readline replacement used in Redis,
+MongoDB, and Android.
+
+* Single and multi line editing mode with the usual key bindings implemented.
+* History handling.
+* Completion.
+* About 1,100 lines of BSD license source code.
+* Only uses a subset of VT100 escapes (ANSI.SYS compatible).
+
+## Can a line editing library be 20k lines of code?
+
+Line editing with some support for history is a really important feature for command line utilities. Instead of retyping almost the same stuff again and again it's just much better to hit the up arrow and edit on syntax errors, or in order to try a slightly different command. But apparently code dealing with terminals is some sort of Black Magic: readline is 30k lines of code, libedit 20k. Is it reasonable to link small utilities to huge libraries just to get a minimal support for line editing?
+
+So what usually happens is either:
+
+ * Large programs with configure scripts disabling line editing if readline is not present in the system, or not supporting it at all since readline is GPL licensed and libedit (the BSD clone) is not as known and available as readline is (Real world example of this problem: Tclsh).
+ * Smaller programs not using a configure script not supporting line editing at all (A problem we had with Redis-cli for instance).
+
+The result is a pollution of binaries without line editing support.
+
+So I spent more or less two hours doing a reality check resulting in this little library: is it *really* needed for a line editing library to be 20k lines of code? Apparently not, it is possibe to get a very small, zero configuration, trivial to embed library, that solves the problem. Smaller programs will just include this, supporing line editing out of the box. Larger programs may use this little library or just checking with configure if readline/libedit is available and resorting to linenoise if not.
+
+## Terminals, in 2010.
+
+Apparently almost every terminal you can happen to use today has some kind of support for basic VT100 escape sequences. So I tried to write a lib using just very basic VT100 features. The resulting library appears to work everywhere I tried to use it, and now can work even on ANSI.SYS compatible terminals, since no
+VT220 specific sequences are used anymore.
+
+The library is currently about 1100 lines of code. In order to use it in your project just look at the *example.c* file in the source distribution, it is trivial. Linenoise is BSD code, so you can use both in free software and commercial software.
+
+## Tested with...
+
+ * Linux text only console ($TERM = linux)
+ * Linux KDE terminal application ($TERM = xterm)
+ * Linux xterm ($TERM = xterm)
+ * Linux Buildroot ($TERM = vt100)
+ * Mac OS X iTerm ($TERM = xterm)
+ * Mac OS X default Terminal.app ($TERM = xterm)
+ * OpenBSD 4.5 through an OSX Terminal.app ($TERM = screen)
+ * IBM AIX 6.1
+ * FreeBSD xterm ($TERM = xterm)
+ * ANSI.SYS
+
+Please test it everywhere you can and report back!
+
+## Let's push this forward!
+
+Patches should be provided in the respect of linenoise sensibility for small
+easy to understand code.
+
+Send feedbacks to antirez at gmail
diff --git a/tools/dev/svnmover/linenoise/linenoise.c b/tools/dev/svnmover/linenoise/linenoise.c
new file mode 100644
index 0000000..058f68e
--- /dev/null
+++ b/tools/dev/svnmover/linenoise/linenoise.c
@@ -0,0 +1,1112 @@
+/* linenoise.c -- guerrilla line editing library against the idea that a
+ * line editing lib needs to be 20,000 lines of C code.
+ *
+ * You can find the latest source code at:
+ *
+ * http://github.com/antirez/linenoise
+ *
+ * Does a number of crazy assumptions that happen to be true in 99.9999% of
+ * the 2010 UNIX computers around.
+ *
+ * ------------------------------------------------------------------------
+ *
+ * Copyright (c) 2010-2014, Salvatore Sanfilippo <antirez at gmail dot com>
+ * Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * ------------------------------------------------------------------------
+ *
+ * References:
+ * - http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
+ * - http://www.3waylabs.com/nw/WWW/products/wizcon/vt220.html
+ *
+ * Todo list:
+ * - Filter bogus Ctrl+<char> combinations.
+ * - Win32 support
+ *
+ * Bloat:
+ * - History search like Ctrl+r in readline?
+ *
+ * List of escape sequences used by this program, we do everything just
+ * with three sequences. In order to be so cheap we may have some
+ * flickering effect with some slow terminal, but the lesser sequences
+ * the more compatible.
+ *
+ * EL (Erase Line)
+ * Sequence: ESC [ n K
+ * Effect: if n is 0 or missing, clear from cursor to end of line
+ * Effect: if n is 1, clear from beginning of line to cursor
+ * Effect: if n is 2, clear entire line
+ *
+ * CUF (CUrsor Forward)
+ * Sequence: ESC [ n C
+ * Effect: moves cursor forward n chars
+ *
+ * CUB (CUrsor Backward)
+ * Sequence: ESC [ n D
+ * Effect: moves cursor backward n chars
+ *
+ * The following is used to get the terminal width if getting
+ * the width with the TIOCGWINSZ ioctl fails
+ *
+ * DSR (Device Status Report)
+ * Sequence: ESC [ 6 n
+ * Effect: reports the current cusor position as ESC [ n ; m R
+ * where n is the row and m is the column
+ *
+ * When multi line mode is enabled, we also use an additional escape
+ * sequence. However multi line editing is disabled by default.
+ *
+ * CUU (Cursor Up)
+ * Sequence: ESC [ n A
+ * Effect: moves cursor up of n chars.
+ *
+ * CUD (Cursor Down)
+ * Sequence: ESC [ n B
+ * Effect: moves cursor down of n chars.
+ *
+ * When linenoiseClearScreen() is called, two additional escape sequences
+ * are used in order to clear the screen and position the cursor at home
+ * position.
+ *
+ * CUP (Cursor position)
+ * Sequence: ESC [ H
+ * Effect: moves the cursor to upper left corner
+ *
+ * ED (Erase display)
+ * Sequence: ESC [ 2 J
+ * Effect: clear the whole screen
+ *
+ */
+
+/* Tell the compiler to be quiet about implicit conversions from
+ [s]size_t to int. */
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)
+# if defined(__APPLE_CC__) || defined(__clang__)
+# pragma GCC diagnostic ignored "-Wshorten-64-to-32"
+# endif
+#endif
+
+#include <termios.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include "linenoise.h"
+
+#define LINENOISE_DEFAULT_HISTORY_MAX_LEN 100
+#define LINENOISE_MAX_LINE 4096
+static const char *unsupported_term[] = {"dumb","cons25","emacs",NULL};
+static linenoiseCompletionCallback *completionCallback = NULL;
+
+static struct termios orig_termios; /* In order to restore at exit.*/
+static int rawmode = 0; /* For atexit() function to check if restore is needed*/
+static int mlmode = 0; /* Multi line mode. Default is single line. */
+static int atexit_registered = 0; /* Register atexit just 1 time. */
+static int history_max_len = LINENOISE_DEFAULT_HISTORY_MAX_LEN;
+static int history_len = 0;
+static char **history = NULL;
+
+/* The linenoiseState structure represents the state during line editing.
+ * We pass this state to functions implementing specific editing
+ * functionalities. */
+struct linenoiseState {
+ int ifd; /* Terminal stdin file descriptor. */
+ int ofd; /* Terminal stdout file descriptor. */
+ char *buf; /* Edited line buffer. */
+ size_t buflen; /* Edited line buffer size. */
+ const char *prompt; /* Prompt to display. */
+ size_t plen; /* Prompt length. */
+ size_t pos; /* Current cursor position. */
+ size_t oldpos; /* Previous refresh cursor position. */
+ size_t len; /* Current edited line length. */
+ size_t cols; /* Number of columns in terminal. */
+ size_t maxrows; /* Maximum num of rows used so far (multiline mode) */
+ int history_index; /* The history index we are currently editing. */
+};
+
+enum KEY_ACTION{
+ KEY_NULL = 0, /* NULL */
+ CTRL_A = 1, /* Ctrl+a */
+ CTRL_B = 2, /* Ctrl-b */
+ CTRL_C = 3, /* Ctrl-c */
+ CTRL_D = 4, /* Ctrl-d */
+ CTRL_E = 5, /* Ctrl-e */
+ CTRL_F = 6, /* Ctrl-f */
+ CTRL_H = 8, /* Ctrl-h */
+ TAB = 9, /* Tab */
+ CTRL_K = 11, /* Ctrl+k */
+ CTRL_L = 12, /* Ctrl+l */
+ ENTER = 13, /* Enter */
+ CTRL_N = 14, /* Ctrl-n */
+ CTRL_P = 16, /* Ctrl-p */
+ CTRL_T = 20, /* Ctrl-t */
+ CTRL_U = 21, /* Ctrl+u */
+ CTRL_W = 23, /* Ctrl+w */
+ ESC = 27, /* Escape */
+ BACKSPACE = 127 /* Backspace */
+};
+
+static void linenoiseAtExit(void);
+static void refreshLine(struct linenoiseState *l);
+
+/* Debugging macro. */
+#if 0
+FILE *lndebug_fp = NULL;
+#define lndebug(...) \
+ do { \
+ if (lndebug_fp == NULL) { \
+ lndebug_fp = fopen("/tmp/lndebug.txt","a"); \
+ fprintf(lndebug_fp, \
+ "[%d %d %d] p: %d, rows: %d, rpos: %d, max: %d, oldmax: %d\n", \
+ (int)l->len,(int)l->pos,(int)l->oldpos,plen,rows,rpos, \
+ (int)l->maxrows,old_rows); \
+ } \
+ fprintf(lndebug_fp, ", " __VA_ARGS__); \
+ fflush(lndebug_fp); \
+ } while (0)
+#else
+static void lndebug(const char *fmt, ...) { }
+#endif
+
+/* ======================= Low level terminal handling ====================== */
+
+/* Set if to use or not the multi line mode. */
+void linenoiseSetMultiLine(int ml) {
+ mlmode = ml;
+}
+
+/* Return true if the terminal name is in the list of terminals we know are
+ * not able to understand basic escape sequences. */
+static int isUnsupportedTerm(void) {
+ char *term = getenv("TERM");
+ int j;
+
+ if (term == NULL) return 0;
+ for (j = 0; unsupported_term[j]; j++)
+ if (!strcasecmp(term,unsupported_term[j])) return 1;
+ return 0;
+}
+
+/* Raw mode: 1960 magic shit. */
+static int enableRawMode(int fd) {
+ struct termios raw;
+
+ if (!isatty(STDIN_FILENO)) goto fatal;
+ if (!atexit_registered) {
+ atexit(linenoiseAtExit);
+ atexit_registered = 1;
+ }
+ if (tcgetattr(fd,&orig_termios) == -1) goto fatal;
+
+ raw = orig_termios; /* modify the original mode */
+ /* input modes: no break, no CR to NL, no parity check, no strip char,
+ * no start/stop output control. */
+ raw.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON);
+ /* output modes - disable post processing */
+ raw.c_oflag &= ~(OPOST);
+ /* control modes - set 8 bit chars */
+ raw.c_cflag |= (CS8);
+ /* local modes - choing off, canonical off, no extended functions,
+ * no signal chars (^Z,^C) */
+ raw.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG);
+ /* control chars - set return condition: min number of bytes and timer.
+ * We want read to return every single byte, without timeout. */
+ raw.c_cc[VMIN] = 1; raw.c_cc[VTIME] = 0; /* 1 byte, no timer */
+
+ /* put terminal in raw mode after flushing */
+ if (tcsetattr(fd,TCSAFLUSH,&raw) < 0) goto fatal;
+ rawmode = 1;
+ return 0;
+
+fatal:
+ errno = ENOTTY;
+ return -1;
+}
+
+static void disableRawMode(int fd) {
+ /* Don't even check the return value as it's too late. */
+ if (rawmode && tcsetattr(fd,TCSAFLUSH,&orig_termios) != -1)
+ rawmode = 0;
+}
+
+/* Use the ESC [6n escape sequence to query the horizontal cursor position
+ * and return it. On error -1 is returned, on success the position of the
+ * cursor. */
+static int getCursorPosition(int ifd, int ofd) {
+ char buf[32];
+ int cols, rows;
+ unsigned int i = 0;
+
+ /* Report cursor location */
+ if (write(ofd, "\x1b[6n", 4) != 4) return -1;
+
+ /* Read the response: ESC [ rows ; cols R */
+ while (i < sizeof(buf)-1) {
+ if (read(ifd,buf+i,1) != 1) break;
+ if (buf[i] == 'R') break;
+ i++;
+ }
+ buf[i] = '\0';
+
+ /* Parse it. */
+ if (buf[0] != ESC || buf[1] != '[') return -1;
+ if (sscanf(buf+2,"%d;%d",&rows,&cols) != 2) return -1;
+ return cols;
+}
+
+/* Try to get the number of columns in the current terminal, or assume 80
+ * if it fails. */
+static int getColumns(int ifd, int ofd) {
+ struct winsize ws;
+
+ if (ioctl(1, TIOCGWINSZ, &ws) == -1 || ws.ws_col == 0) {
+ /* ioctl() failed. Try to query the terminal itself. */
+ int start, cols;
+
+ /* Get the initial position so we can restore it later. */
+ start = getCursorPosition(ifd,ofd);
+ if (start == -1) goto failed;
+
+ /* Go to right margin and get position. */
+ if (write(ofd,"\x1b[999C",6) != 6) goto failed;
+ cols = getCursorPosition(ifd,ofd);
+ if (cols == -1) goto failed;
+
+ /* Restore position. */
+ if (cols > start) {
+ char seq[32];
+ snprintf(seq,32,"\x1b[%dD",cols-start);
+ if (write(ofd,seq,strlen(seq)) == -1) {
+ /* Can't recover... */
+ }
+ }
+ return cols;
+ } else {
+ return ws.ws_col;
+ }
+
+failed:
+ return 80;
+}
+
+/* Clear the screen. Used to handle ctrl+l */
+void linenoiseClearScreen(void) {
+ if (write(STDOUT_FILENO,"\x1b[H\x1b[2J",7) <= 0) {
+ /* nothing to do, just to avoid warning. */
+ }
+}
+
+/* Beep, used for completion when there is nothing to complete or when all
+ * the choices were already shown. */
+static void linenoiseBeep(void) {
+ fprintf(stderr, "\x7");
+ fflush(stderr);
+}
+
+/* ============================== Completion ================================ */
+
+/* Free a list of completion option populated by linenoiseAddCompletion(). */
+static void freeCompletions(linenoiseCompletions *lc) {
+ size_t i;
+ for (i = 0; i < lc->len; i++)
+ free(lc->cvec[i]);
+ if (lc->cvec != NULL)
+ free(lc->cvec);
+}
+
+/* This is an helper function for linenoiseEdit() and is called when the
+ * user types the <tab> key in order to complete the string currently in the
+ * input.
+ *
+ * The state of the editing is encapsulated into the pointed linenoiseState
+ * structure as described in the structure definition. */
+static int completeLine(struct linenoiseState *ls) {
+ linenoiseCompletions lc = { 0, NULL };
+ int nread, nwritten;
+ char c = 0;
+
+ completionCallback(ls->buf,&lc);
+ if (lc.len == 0) {
+ linenoiseBeep();
+ } else {
+ size_t stop = 0, i = 0;
+
+ while(!stop) {
+ /* Show completion or original buffer */
+ if (i < lc.len) {
+ struct linenoiseState saved = *ls;
+
+ ls->len = ls->pos = strlen(lc.cvec[i]);
+ ls->buf = lc.cvec[i];
+ refreshLine(ls);
+ ls->len = saved.len;
+ ls->pos = saved.pos;
+ ls->buf = saved.buf;
+ } else {
+ refreshLine(ls);
+ }
+
+ nread = read(ls->ifd,&c,1);
+ if (nread <= 0) {
+ freeCompletions(&lc);
+ return -1;
+ }
+
+ switch(c) {
+ case 9: /* tab */
+ i = (i+1) % (lc.len+1);
+ if (i == lc.len) linenoiseBeep();
+ break;
+ case 27: /* escape */
+ /* Re-show original buffer */
+ if (i < lc.len) refreshLine(ls);
+ stop = 1;
+ break;
+ default:
+ /* Update buffer and return */
+ if (i < lc.len) {
+ nwritten = snprintf(ls->buf,ls->buflen,"%s",lc.cvec[i]);
+ ls->len = ls->pos = nwritten;
+ }
+ stop = 1;
+ break;
+ }
+ }
+ }
+
+ freeCompletions(&lc);
+ return c; /* Return last read character */
+}
+
+/* Register a callback function to be called for tab-completion. */
+void linenoiseSetCompletionCallback(linenoiseCompletionCallback *fn) {
+ completionCallback = fn;
+}
+
+/* This function is used by the callback function registered by the user
+ * in order to add completion options given the input string when the
+ * user typed <tab>. See the example.c source code for a very easy to
+ * understand example. */
+void linenoiseAddCompletion(linenoiseCompletions *lc, const char *str) {
+ size_t len = strlen(str);
+ char *copy, **cvec;
+
+ copy = malloc(len+1);
+ if (copy == NULL) return;
+ memcpy(copy,str,len+1);
+ cvec = realloc(lc->cvec,sizeof(char*)*(lc->len+1));
+ if (cvec == NULL) {
+ free(copy);
+ return;
+ }
+ lc->cvec = cvec;
+ lc->cvec[lc->len++] = copy;
+}
+
+/* =========================== Line editing ================================= */
+
+/* We define a very simple "append buffer" structure, that is an heap
+ * allocated string where we can append to. This is useful in order to
+ * write all the escape sequences in a buffer and flush them to the standard
+ * output in a single call, to avoid flickering effects. */
+struct abuf {
+ char *b;
+ int len;
+};
+
+static void abInit(struct abuf *ab) {
+ ab->b = NULL;
+ ab->len = 0;
+}
+
+static void abAppend(struct abuf *ab, const char *s, int len) {
+ char *new = realloc(ab->b,ab->len+len);
+
+ if (new == NULL) return;
+ memcpy(new+ab->len,s,len);
+ ab->b = new;
+ ab->len += len;
+}
+
+static void abFree(struct abuf *ab) {
+ free(ab->b);
+}
+
+/* Single line low level line refresh.
+ *
+ * Rewrite the currently edited line accordingly to the buffer content,
+ * cursor position, and number of columns of the terminal. */
+static void refreshSingleLine(struct linenoiseState *l) {
+ char seq[64];
+ size_t plen = strlen(l->prompt);
+ int fd = l->ofd;
+ char *buf = l->buf;
+ size_t len = l->len;
+ size_t pos = l->pos;
+ struct abuf ab;
+
+ while((plen+pos) >= l->cols) {
+ buf++;
+ len--;
+ pos--;
+ }
+ while (plen+len > l->cols) {
+ len--;
+ }
+
+ abInit(&ab);
+ /* Cursor to left edge */
+ snprintf(seq,64,"\r");
+ abAppend(&ab,seq,strlen(seq));
+ /* Write the prompt and the current buffer content */
+ abAppend(&ab,l->prompt,strlen(l->prompt));
+ abAppend(&ab,buf,len);
+ /* Erase to right */
+ snprintf(seq,64,"\x1b[0K");
+ abAppend(&ab,seq,strlen(seq));
+ /* Move cursor to original position. */
+ snprintf(seq,64,"\r\x1b[%dC", (int)(pos+plen));
+ abAppend(&ab,seq,strlen(seq));
+ if (write(fd,ab.b,ab.len) == -1) {} /* Can't recover from write error. */
+ abFree(&ab);
+}
+
+/* Multi line low level line refresh.
+ *
+ * Rewrite the currently edited line accordingly to the buffer content,
+ * cursor position, and number of columns of the terminal. */
+static void refreshMultiLine(struct linenoiseState *l) {
+ char seq[64];
+ int plen = strlen(l->prompt);
+ int rows = (plen+l->len+l->cols-1)/l->cols; /* rows used by current buf. */
+ int rpos = (plen+l->oldpos+l->cols)/l->cols; /* cursor relative row. */
+ int rpos2; /* rpos after refresh. */
+ int col; /* colum position, zero-based. */
+ int old_rows = l->maxrows;
+ int fd = l->ofd, j;
+ struct abuf ab;
+
+ /* Update maxrows if needed. */
+ if (rows > (int)l->maxrows) l->maxrows = rows;
+
+ /* First step: clear all the lines used before. To do so start by
+ * going to the last row. */
+ abInit(&ab);
+ if (old_rows-rpos > 0) {
+ lndebug("go down %d", old_rows-rpos);
+ snprintf(seq,64,"\x1b[%dB", old_rows-rpos);
+ abAppend(&ab,seq,strlen(seq));
+ }
+
+ /* Now for every row clear it, go up. */
+ for (j = 0; j < old_rows-1; j++) {
+ lndebug("clear+up");
+ snprintf(seq,64,"\r\x1b[0K\x1b[1A");
+ abAppend(&ab,seq,strlen(seq));
+ }
+
+ /* Clean the top line. */
+ lndebug("clear");
+ snprintf(seq,64,"\r\x1b[0K");
+ abAppend(&ab,seq,strlen(seq));
+
+ /* Write the prompt and the current buffer content */
+ abAppend(&ab,l->prompt,strlen(l->prompt));
+ abAppend(&ab,l->buf,l->len);
+
+ /* If we are at the very end of the screen with our prompt, we need to
+ * emit a newline and move the prompt to the first column. */
+ if (l->pos &&
+ l->pos == l->len &&
+ (l->pos+plen) % l->cols == 0)
+ {
+ lndebug("<newline>");
+ abAppend(&ab,"\n",1);
+ snprintf(seq,64,"\r");
+ abAppend(&ab,seq,strlen(seq));
+ rows++;
+ if (rows > (int)l->maxrows) l->maxrows = rows;
+ }
+
+ /* Move cursor to right position. */
+ rpos2 = (plen+l->pos+l->cols)/l->cols; /* current cursor relative row. */
+ lndebug("rpos2 %d", rpos2);
+
+ /* Go up till we reach the expected positon. */
+ if (rows-rpos2 > 0) {
+ lndebug("go-up %d", rows-rpos2);
+ snprintf(seq,64,"\x1b[%dA", rows-rpos2);
+ abAppend(&ab,seq,strlen(seq));
+ }
+
+ /* Set column. */
+ col = (plen+(int)l->pos) % (int)l->cols;
+ lndebug("set col %d", 1+col);
+ if (col)
+ snprintf(seq,64,"\r\x1b[%dC", col);
+ else
+ snprintf(seq,64,"\r");
+ abAppend(&ab,seq,strlen(seq));
+
+ lndebug("\n");
+ l->oldpos = l->pos;
+
+ if (write(fd,ab.b,ab.len) == -1) {} /* Can't recover from write error. */
+ abFree(&ab);
+}
+
+/* Calls the two low level functions refreshSingleLine() or
+ * refreshMultiLine() according to the selected mode. */
+static void refreshLine(struct linenoiseState *l) {
+ if (mlmode)
+ refreshMultiLine(l);
+ else
+ refreshSingleLine(l);
+}
+
+/* Insert the character 'c' at cursor current position.
+ *
+ * On error writing to the terminal -1 is returned, otherwise 0. */
+static int linenoiseEditInsert(struct linenoiseState *l, char c) {
+ if (l->len < l->buflen) {
+ if (l->len == l->pos) {
+ l->buf[l->pos] = c;
+ l->pos++;
+ l->len++;
+ l->buf[l->len] = '\0';
+ if ((!mlmode && l->plen+l->len < l->cols) /* || mlmode */) {
+ /* Avoid a full update of the line in the
+ * trivial case. */
+ if (write(l->ofd,&c,1) == -1) return -1;
+ } else {
+ refreshLine(l);
+ }
+ } else {
+ memmove(l->buf+l->pos+1,l->buf+l->pos,l->len-l->pos);
+ l->buf[l->pos] = c;
+ l->len++;
+ l->pos++;
+ l->buf[l->len] = '\0';
+ refreshLine(l);
+ }
+ }
+ return 0;
+}
+
+/* Move cursor on the left. */
+static void linenoiseEditMoveLeft(struct linenoiseState *l) {
+ if (l->pos > 0) {
+ l->pos--;
+ refreshLine(l);
+ }
+}
+
+/* Move cursor on the right. */
+static void linenoiseEditMoveRight(struct linenoiseState *l) {
+ if (l->pos != l->len) {
+ l->pos++;
+ refreshLine(l);
+ }
+}
+
+/* Move cursor to the start of the line. */
+static void linenoiseEditMoveHome(struct linenoiseState *l) {
+ if (l->pos != 0) {
+ l->pos = 0;
+ refreshLine(l);
+ }
+}
+
+/* Move cursor to the end of the line. */
+static void linenoiseEditMoveEnd(struct linenoiseState *l) {
+ if (l->pos != l->len) {
+ l->pos = l->len;
+ refreshLine(l);
+ }
+}
+
+/* Substitute the currently edited line with the next or previous history
+ * entry as specified by 'dir'. */
+#define LINENOISE_HISTORY_NEXT 0
+#define LINENOISE_HISTORY_PREV 1
+static void linenoiseEditHistoryNext(struct linenoiseState *l, int dir) {
+ if (history_len > 1) {
+ /* Update the current history entry before to
+ * overwrite it with the next one. */
+ free(history[history_len - 1 - l->history_index]);
+ history[history_len - 1 - l->history_index] = strdup(l->buf);
+ /* Show the new entry */
+ l->history_index += (dir == LINENOISE_HISTORY_PREV) ? 1 : -1;
+ if (l->history_index < 0) {
+ l->history_index = 0;
+ return;
+ } else if (l->history_index >= history_len) {
+ l->history_index = history_len-1;
+ return;
+ }
+ strncpy(l->buf,history[history_len - 1 - l->history_index],l->buflen);
+ l->buf[l->buflen-1] = '\0';
+ l->len = l->pos = strlen(l->buf);
+ refreshLine(l);
+ }
+}
+
+/* Delete the character at the right of the cursor without altering the cursor
+ * position. Basically this is what happens with the "Delete" keyboard key. */
+static void linenoiseEditDelete(struct linenoiseState *l) {
+ if (l->len > 0 && l->pos < l->len) {
+ memmove(l->buf+l->pos,l->buf+l->pos+1,l->len-l->pos-1);
+ l->len--;
+ l->buf[l->len] = '\0';
+ refreshLine(l);
+ }
+}
+
+/* Backspace implementation. */
+static void linenoiseEditBackspace(struct linenoiseState *l) {
+ if (l->pos > 0 && l->len > 0) {
+ memmove(l->buf+l->pos-1,l->buf+l->pos,l->len-l->pos);
+ l->pos--;
+ l->len--;
+ l->buf[l->len] = '\0';
+ refreshLine(l);
+ }
+}
+
+/* Delete the previosu word, maintaining the cursor at the start of the
+ * current word. */
+static void linenoiseEditDeletePrevWord(struct linenoiseState *l) {
+ size_t old_pos = l->pos;
+ size_t diff;
+
+ while (l->pos > 0 && l->buf[l->pos-1] == ' ')
+ l->pos--;
+ while (l->pos > 0 && l->buf[l->pos-1] != ' ')
+ l->pos--;
+ diff = old_pos - l->pos;
+ memmove(l->buf+l->pos,l->buf+old_pos,l->len-old_pos+1);
+ l->len -= diff;
+ refreshLine(l);
+}
+
+/* This function is the core of the line editing capability of linenoise.
+ * It expects 'fd' to be already in "raw mode" so that every key pressed
+ * will be returned ASAP to read().
+ *
+ * The resulting string is put into 'buf' when the user type enter, or
+ * when ctrl+d is typed.
+ *
+ * The function returns the length of the current buffer. */
+static int linenoiseEdit(int stdin_fd, int stdout_fd, char *buf, size_t buflen, const char *prompt)
+{
+ struct linenoiseState l;
+
+ /* Populate the linenoise state that we pass to functions implementing
+ * specific editing functionalities. */
+ l.ifd = stdin_fd;
+ l.ofd = stdout_fd;
+ l.buf = buf;
+ l.buflen = buflen;
+ l.prompt = prompt;
+ l.plen = strlen(prompt);
+ l.oldpos = l.pos = 0;
+ l.len = 0;
+ l.cols = getColumns(stdin_fd, stdout_fd);
+ l.maxrows = 0;
+ l.history_index = 0;
+
+ /* Buffer starts empty. */
+ l.buf[0] = '\0';
+ l.buflen--; /* Make sure there is always space for the nulterm */
+
+ /* The latest history entry is always our current buffer, that
+ * initially is just an empty string. */
+ linenoiseHistoryAdd("");
+
+ if (write(l.ofd,prompt,l.plen) == -1) return -1;
+ while(1) {
+ char c;
+ int nread;
+ char seq[3];
+
+ nread = read(l.ifd,&c,1);
+ if (nread <= 0) return l.len;
+
+ /* Only autocomplete when the callback is set. It returns < 0 when
+ * there was an error reading from fd. Otherwise it will return the
+ * character that should be handled next. */
+ if (c == 9 && completionCallback != NULL) {
+ c = completeLine(&l);
+ /* Return on errors */
+ if (c < 0) return l.len;
+ /* Read next character when 0 */
+ if (c == 0) continue;
+ }
+
+ switch(c) {
+ case ENTER: /* enter */
+ history_len--;
+ free(history[history_len]);
+ if (mlmode) linenoiseEditMoveEnd(&l);
+ return (int)l.len;
+ case CTRL_C: /* ctrl-c */
+ errno = EAGAIN;
+ return -1;
+ case BACKSPACE: /* backspace */
+ case 8: /* ctrl-h */
+ linenoiseEditBackspace(&l);
+ break;
+ case CTRL_D: /* ctrl-d, remove char at right of cursor, or if the
+ line is empty, act as end-of-file. */
+ if (l.len > 0) {
+ linenoiseEditDelete(&l);
+ } else {
+ history_len--;
+ free(history[history_len]);
+ return -1;
+ }
+ break;
+ case CTRL_T: /* ctrl-t, swaps current character with previous. */
+ if (l.pos > 0 && l.pos < l.len) {
+ int aux = buf[l.pos-1];
+ buf[l.pos-1] = buf[l.pos];
+ buf[l.pos] = aux;
+ if (l.pos != l.len-1) l.pos++;
+ refreshLine(&l);
+ }
+ break;
+ case CTRL_B: /* ctrl-b */
+ linenoiseEditMoveLeft(&l);
+ break;
+ case CTRL_F: /* ctrl-f */
+ linenoiseEditMoveRight(&l);
+ break;
+ case CTRL_P: /* ctrl-p */
+ linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_PREV);
+ break;
+ case CTRL_N: /* ctrl-n */
+ linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_NEXT);
+ break;
+ case ESC: /* escape sequence */
+ /* Read the next two bytes representing the escape sequence.
+ * Use two calls to handle slow terminals returning the two
+ * chars at different times. */
+ if (read(l.ifd,seq,1) == -1) break;
+ if (read(l.ifd,seq+1,1) == -1) break;
+
+ /* ESC [ sequences. */
+ if (seq[0] == '[') {
+ if (seq[1] >= '0' && seq[1] <= '9') {
+ /* Extended escape, read additional byte. */
+ if (read(l.ifd,seq+2,1) == -1) break;
+ if (seq[2] == '~') {
+ switch(seq[1]) {
+ case '3': /* Delete key. */
+ linenoiseEditDelete(&l);
+ break;
+ }
+ }
+ } else {
+ switch(seq[1]) {
+ case 'A': /* Up */
+ linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_PREV);
+ break;
+ case 'B': /* Down */
+ linenoiseEditHistoryNext(&l, LINENOISE_HISTORY_NEXT);
+ break;
+ case 'C': /* Right */
+ linenoiseEditMoveRight(&l);
+ break;
+ case 'D': /* Left */
+ linenoiseEditMoveLeft(&l);
+ break;
+ case 'H': /* Home */
+ linenoiseEditMoveHome(&l);
+ break;
+ case 'F': /* End*/
+ linenoiseEditMoveEnd(&l);
+ break;
+ }
+ }
+ }
+
+ /* ESC O sequences. */
+ else if (seq[0] == 'O') {
+ switch(seq[1]) {
+ case 'H': /* Home */
+ linenoiseEditMoveHome(&l);
+ break;
+ case 'F': /* End*/
+ linenoiseEditMoveEnd(&l);
+ break;
+ }
+ }
+ break;
+ default:
+ if (linenoiseEditInsert(&l,c)) return -1;
+ break;
+ case CTRL_U: /* Ctrl+u, delete the whole line. */
+ buf[0] = '\0';
+ l.pos = l.len = 0;
+ refreshLine(&l);
+ break;
+ case CTRL_K: /* Ctrl+k, delete from current to end of line. */
+ buf[l.pos] = '\0';
+ l.len = l.pos;
+ refreshLine(&l);
+ break;
+ case CTRL_A: /* Ctrl+a, go to the start of the line */
+ linenoiseEditMoveHome(&l);
+ break;
+ case CTRL_E: /* ctrl+e, go to the end of the line */
+ linenoiseEditMoveEnd(&l);
+ break;
+ case CTRL_L: /* ctrl+l, clear screen */
+ linenoiseClearScreen();
+ refreshLine(&l);
+ break;
+ case CTRL_W: /* ctrl+w, delete previous word */
+ linenoiseEditDeletePrevWord(&l);
+ break;
+ }
+ }
+ return l.len;
+}
+
+/* This special mode is used by linenoise in order to print scan codes
+ * on screen for debugging / development purposes. It is implemented
+ * by the linenoise_example program using the --keycodes option. */
+void linenoisePrintKeyCodes(void) {
+ char quit[4];
+
+ printf("Linenoise key codes debugging mode.\n"
+ "Press keys to see scan codes. Type 'quit' at any time to exit.\n");
+ if (enableRawMode(STDIN_FILENO) == -1) return;
+ memset(quit,' ',4);
+ while(1) {
+ char c;
+ int nread;
+
+ nread = read(STDIN_FILENO,&c,1);
+ if (nread <= 0) continue;
+ memmove(quit,quit+1,sizeof(quit)-1); /* shift string to left. */
+ quit[sizeof(quit)-1] = c; /* Insert current char on the right. */
+ if (memcmp(quit,"quit",sizeof(quit)) == 0) break;
+
+ printf("'%c' %02x (%d) (type quit to exit)\n",
+ isprint(c) ? c : '?', (int)c, (int)c);
+ printf("\r"); /* Go left edge manually, we are in raw mode. */
+ fflush(stdout);
+ }
+ disableRawMode(STDIN_FILENO);
+}
+
+/* This function calls the line editing function linenoiseEdit() using
+ * the STDIN file descriptor set in raw mode. */
+static int linenoiseRaw(char *buf, size_t buflen, const char *prompt) {
+ int count;
+
+ if (buflen == 0) {
+ errno = EINVAL;
+ return -1;
+ }
+ if (!isatty(STDIN_FILENO)) {
+ /* Not a tty: read from file / pipe. */
+ if (fgets(buf, buflen, stdin) == NULL) return -1;
+ count = strlen(buf);
+ if (count && buf[count-1] == '\n') {
+ count--;
+ buf[count] = '\0';
+ }
+ } else {
+ /* Interactive editing. */
+ if (enableRawMode(STDIN_FILENO) == -1) return -1;
+ count = linenoiseEdit(STDIN_FILENO, STDOUT_FILENO, buf, buflen, prompt);
+ disableRawMode(STDIN_FILENO);
+ printf("\n");
+ }
+ return count;
+}
+
+/* The high level function that is the main API of the linenoise library.
+ * This function checks if the terminal has basic capabilities, just checking
+ * for a blacklist of stupid terminals, and later either calls the line
+ * editing function or uses dummy fgets() so that you will be able to type
+ * something even in the most desperate of the conditions. */
+char *linenoise(const char *prompt) {
+ char buf[LINENOISE_MAX_LINE];
+ int count;
+
+ if (isUnsupportedTerm()) {
+ size_t len;
+
+ printf("%s",prompt);
+ fflush(stdout);
+ if (fgets(buf,LINENOISE_MAX_LINE,stdin) == NULL) return NULL;
+ len = strlen(buf);
+ while(len && (buf[len-1] == '\n' || buf[len-1] == '\r')) {
+ len--;
+ buf[len] = '\0';
+ }
+ return strdup(buf);
+ } else {
+ count = linenoiseRaw(buf,LINENOISE_MAX_LINE,prompt);
+ if (count == -1) return NULL;
+ return strdup(buf);
+ }
+}
+
+/* ================================ History ================================= */
+
+/* Free the history, but does not reset it. Only used when we have to
+ * exit() to avoid memory leaks are reported by valgrind & co. */
+static void freeHistory(void) {
+ if (history) {
+ int j;
+
+ for (j = 0; j < history_len; j++)
+ free(history[j]);
+ free(history);
+ }
+}
+
+/* At exit we'll try to fix the terminal to the initial conditions. */
+static void linenoiseAtExit(void) {
+ disableRawMode(STDIN_FILENO);
+ freeHistory();
+}
+
+/* This is the API call to add a new entry in the linenoise history.
+ * It uses a fixed array of char pointers that are shifted (memmoved)
+ * when the history max length is reached in order to remove the older
+ * entry and make room for the new one, so it is not exactly suitable for huge
+ * histories, but will work well for a few hundred of entries.
+ *
+ * Using a circular buffer is smarter, but a bit more complex to handle. */
+int linenoiseHistoryAdd(const char *line) {
+ char *linecopy;
+
+ if (history_max_len == 0) return 0;
+
+ /* Initialization on first call. */
+ if (history == NULL) {
+ history = malloc(sizeof(char*)*history_max_len);
+ if (history == NULL) return 0;
+ memset(history,0,(sizeof(char*)*history_max_len));
+ }
+
+ /* Don't add duplicated lines. */
+ if (history_len && !strcmp(history[history_len-1], line)) return 0;
+
+ /* Add an heap allocated copy of the line in the history.
+ * If we reached the max length, remove the older line. */
+ linecopy = strdup(line);
+ if (!linecopy) return 0;
+ if (history_len == history_max_len) {
+ free(history[0]);
+ memmove(history,history+1,sizeof(char*)*(history_max_len-1));
+ history_len--;
+ }
+ history[history_len] = linecopy;
+ history_len++;
+ return 1;
+}
+
+/* Set the maximum length for the history. This function can be called even
+ * if there is already some history, the function will make sure to retain
+ * just the latest 'len' elements if the new history length value is smaller
+ * than the amount of items already inside the history. */
+int linenoiseHistorySetMaxLen(int len) {
+ char **new;
+
+ if (len < 1) return 0;
+ if (history) {
+ int tocopy = history_len;
+
+ new = malloc(sizeof(char*)*len);
+ if (new == NULL) return 0;
+
+ /* If we can't copy everything, free the elements we'll not use. */
+ if (len < tocopy) {
+ int j;
+
+ for (j = 0; j < tocopy-len; j++) free(history[j]);
+ tocopy = len;
+ }
+ memset(new,0,sizeof(char*)*len);
+ memcpy(new,history+(history_len-tocopy), sizeof(char*)*tocopy);
+ free(history);
+ history = new;
+ }
+ history_max_len = len;
+ if (history_len > history_max_len)
+ history_len = history_max_len;
+ return 1;
+}
+
+/* Save the history in the specified file. On success 0 is returned
+ * otherwise -1 is returned. */
+int linenoiseHistorySave(const char *filename) {
+ FILE *fp = fopen(filename,"w");
+ int j;
+
+ if (fp == NULL) return -1;
+ for (j = 0; j < history_len; j++)
+ fprintf(fp,"%s\n",history[j]);
+ fclose(fp);
+ return 0;
+}
+
+/* Load the history from the specified file. If the file does not exist
+ * zero is returned and no operation is performed.
+ *
+ * If the file exists and the operation succeeded 0 is returned, otherwise
+ * on error -1 is returned. */
+int linenoiseHistoryLoad(const char *filename) {
+ FILE *fp = fopen(filename,"r");
+ char buf[LINENOISE_MAX_LINE];
+
+ if (fp == NULL) return -1;
+
+ while (fgets(buf,LINENOISE_MAX_LINE,fp) != NULL) {
+ char *p;
+
+ p = strchr(buf,'\r');
+ if (!p) p = strchr(buf,'\n');
+ if (p) *p = '\0';
+ linenoiseHistoryAdd(buf);
+ }
+ fclose(fp);
+ return 0;
+}
diff --git a/tools/dev/svnmover/linenoise/linenoise.h b/tools/dev/svnmover/linenoise/linenoise.h
new file mode 100644
index 0000000..0e89179
--- /dev/null
+++ b/tools/dev/svnmover/linenoise/linenoise.h
@@ -0,0 +1,66 @@
+/* linenoise.h -- guerrilla line editing library against the idea that a
+ * line editing lib needs to be 20,000 lines of C code.
+ *
+ * See linenoise.c for more information.
+ *
+ * ------------------------------------------------------------------------
+ *
+ * Copyright (c) 2010-2014, Salvatore Sanfilippo <antirez at gmail dot com>
+ * Copyright (c) 2010-2013, Pieter Noordhuis <pcnoordhuis at gmail dot com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __LINENOISE_H
+#define __LINENOISE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct linenoiseCompletions {
+ size_t len;
+ char **cvec;
+} linenoiseCompletions;
+
+typedef void(linenoiseCompletionCallback)(const char *, linenoiseCompletions *);
+void linenoiseSetCompletionCallback(linenoiseCompletionCallback *);
+void linenoiseAddCompletion(linenoiseCompletions *, const char *);
+
+char *linenoise(const char *prompt);
+int linenoiseHistoryAdd(const char *line);
+int linenoiseHistorySetMaxLen(int len);
+int linenoiseHistorySave(const char *filename);
+int linenoiseHistoryLoad(const char *filename);
+void linenoiseClearScreen(void);
+void linenoiseSetMultiLine(int ml);
+void linenoisePrintKeyCodes(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __LINENOISE_H */
diff --git a/tools/dev/svnmover/merge3.c b/tools/dev/svnmover/merge3.c
new file mode 100644
index 0000000..ba32fc5
--- /dev/null
+++ b/tools/dev/svnmover/merge3.c
@@ -0,0 +1,1399 @@
+/*
+ * merge3.c: 3-way merging
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include <apr_lib.h>
+
+#include "svn_hash.h"
+#include "svn_iter.h"
+#include "svn_client.h"
+#include "svn_error.h"
+#include "svn_pools.h"
+#include "svn_props.h"
+#include "svn_string.h"
+#include "svn_dirent_uri.h"
+
+#include "private/svn_subr_private.h"
+#include "private/svn_branch_repos.h"
+#include "private/svn_branch_nested.h"
+#include "private/svn_branch_compat.h"
+#include "private/svn_sorts_private.h"
+#include "private/svn_client_private.h"
+
+#include "svnmover.h"
+
+#include "svn_private_config.h"
+
+
+/* ====================================================================== */
+
+#define is_branch_root_element(branch, eid) \
+ (svn_branch__root_eid(branch) == (eid))
+
+/* Return a string suitable for appending to a displayed element name or
+ * element id to indicate that it is a subbranch root element for SUBBRANCH.
+ * Return "" if SUBBRANCH is null.
+ */
+static const char *
+branch_str(svn_branch__state_t *subbranch,
+ apr_pool_t *result_pool)
+{
+ if (subbranch)
+ return apr_psprintf(result_pool,
+ " (branch %s)",
+ svn_branch__get_id(subbranch, result_pool));
+ return "";
+}
+
+/* Return a string suitable for appending to a displayed element name or
+ * element id to indicate that BRANCH:EID is a subbranch root element.
+ * Return "" if the element is not a subbranch root element.
+ */
+static const char *
+subbranch_str(svn_branch__state_t *branch,
+ int eid,
+ apr_pool_t *result_pool)
+{
+ svn_branch__state_t *subbranch;
+
+ svn_error_clear(svn_branch__get_subbranch_at_eid(branch, &subbranch, eid,
+ result_pool));
+ return branch_str(subbranch, result_pool);
+}
+
+/* Return the longest known relative path leading to element EID in ELEMENTS.
+ *
+ * Set *BASE_EID_P to -1 if this path is rooted at the branch root;
+ * otherwise, set *BASE_EID_P to the EID from which the path is relative,
+ * In the latter case, element *BASE_EID_P is not found in ELEMENTS.
+ *
+ * If CYCLE_CONFLICTS is non-null, it maps each EID involved in a cycle to
+ * [something]. If null, assume there are no cycles.
+ *
+ * If there is a cycle, set *BASE_EID_P to the EID of the nearest element
+ * that is part of a cycle and return the path relative to that element.
+ */
+static const char *
+partial_relpath(int *base_eid_p,
+ svn_element__tree_t *elements,
+ apr_hash_t *cycle_conflicts,
+ int eid,
+ apr_pool_t *result_pool)
+{
+ const char *s = "";
+ int this_eid = eid;
+ svn_element__content_t *e;
+
+ while ((e = svn_element__tree_get(elements, this_eid))
+ && (e->parent_eid != -1))
+ {
+ s = svn_relpath_join(e->name, s, result_pool);
+
+ this_eid = e->parent_eid;
+
+ /* Detect cycles */
+ if (cycle_conflicts && svn_eid__hash_get(cycle_conflicts, this_eid))
+ {
+ /* Cycle detected */
+ e = NULL;
+ break;
+ }
+ }
+
+ if (base_eid_p)
+ {
+ if (e)
+ {
+ /* We reached the root element */
+ *base_eid_p = -1;
+ }
+ else
+ {
+ /* We came to this nonexistent or cyclic parent element */
+ *base_eid_p = this_eid;
+ }
+ }
+ return s;
+}
+
+/* */
+static svn_error_t *
+display_relpath(const char **s_p,
+ svn_branch__state_t *branch,
+ apr_hash_t *cycle_conflicts,
+ int eid,
+ apr_pool_t *result_pool)
+{
+ svn_element__tree_t *elements;
+ int base_eid;
+ const char *s;
+
+ SVN_ERR(svn_branch__state_get_elements(branch, &elements, result_pool));
+ s = partial_relpath(&base_eid, elements, cycle_conflicts, eid, result_pool);
+
+ if (base_eid == -1)
+ s = apr_psprintf(result_pool, "/%s", s);
+ else if (base_eid == eid)
+ s = "<nil>";
+ else if (the_ui_mode == UI_MODE_PATHS)
+ s = svn_relpath_join("...", s, result_pool);
+ else
+ {
+ const char *eid_str = apr_psprintf(result_pool, "<e%d>", base_eid);
+ s = svn_relpath_join(eid_str, s, result_pool);
+ }
+ *s_p = s;
+ return SVN_NO_ERROR;
+}
+
+/* Set *S_P to a string describing the identity of element EID. */
+static svn_error_t *
+merged_element_id_str(const char **s_p,
+ conflict_storage_t *conflict_storage,
+ int eid,
+ apr_pool_t *result_pool)
+{
+ const char *s_yca, *s_src, *s_tgt, *s_merged;
+
+ SVN_ERR(display_relpath(&s_yca, conflict_storage->yca_branch, NULL,
+ eid, result_pool));
+ SVN_ERR(display_relpath(&s_src, conflict_storage->src_branch, NULL,
+ eid, result_pool));
+ SVN_ERR(display_relpath(&s_tgt, conflict_storage->tgt_branch, conflict_storage->cycle_conflicts,
+ eid, result_pool));
+ SVN_ERR(display_relpath(&s_merged, conflict_storage->merged_branch,
+ conflict_storage->cycle_conflicts,
+ eid, result_pool));
+ *s_p = apr_psprintf(result_pool,
+ "yca=%s, side1=%s, side2=%s, merged=%s",
+ s_yca, s_src, s_tgt, s_merged);
+ if (the_ui_mode == UI_MODE_EIDS)
+ {
+ *s_p = apr_psprintf(result_pool,
+ "e%d (%s)", eid, *s_p);
+ }
+ return SVN_NO_ERROR;
+}
+
+/* Options to control how strict the merge is about detecting conflicts.
+ *
+ * The options affect cases that, depending on the user's preference, could
+ * either be considered a conflict or be merged to a deterministic result.
+ *
+ * The set of options is flexible and may be extended in future.
+ */
+typedef struct merge_conflict_policy_t
+{
+ /* Whether to merge delete-vs-delete */
+ svn_boolean_t merge_double_delete;
+ /* Whether to merge add-vs-add (with same parent/name/payload) */
+ svn_boolean_t merge_double_add;
+ /* Whether to merge reparent-vs-reparent (with same parent) */
+ svn_boolean_t merge_double_reparent;
+ /* Whether to merge rename-vs-rename (with same name) */
+ svn_boolean_t merge_double_rename;
+ /* Whether to merge modify-vs-modify (with same payload) */
+ svn_boolean_t merge_double_modify;
+ /* Possible additional controls: */
+ /* merge (parent, name, props, text) independently or as a group */
+ /* merge (parent, name) independently or as a group */
+ /* merge (props, text) independently or as a group */
+} merge_conflict_policy_t;
+
+/* An element-merge conflict description.
+ */
+typedef struct element_merge3_conflict_t
+{
+ svn_element__content_t *yca;
+ svn_element__content_t *side1;
+ svn_element__content_t *side2;
+} element_merge3_conflict_t;
+
+static element_merge3_conflict_t *
+element_merge3_conflict_create(svn_element__content_t *yca,
+ svn_element__content_t *side1,
+ svn_element__content_t *side2,
+ apr_pool_t *result_pool)
+{
+ element_merge3_conflict_t *c = apr_pcalloc(result_pool, sizeof(*c));
+
+ c->yca = yca ? svn_element__content_dup(yca, result_pool) : NULL;
+ c->side1 = side1 ? svn_element__content_dup(side1, result_pool) : NULL;
+ c->side2 = side2 ? svn_element__content_dup(side2, result_pool) : NULL;
+ return c;
+}
+
+static element_merge3_conflict_t *
+element_merge3_conflict_dup(element_merge3_conflict_t *old_conflict,
+ apr_pool_t *result_pool)
+{
+ return element_merge3_conflict_create(old_conflict->yca,
+ old_conflict->side1,
+ old_conflict->side2, result_pool);
+}
+
+static svn_error_t *
+element_merge3_conflict_str(const char **s_p,
+ conflict_storage_t *conflict_storage,
+ element_merge3_conflict_t *c,
+ int eid,
+ apr_pool_t *result_pool)
+{
+ const char *id_str;
+
+ SVN_ERR(merged_element_id_str(&id_str, conflict_storage,
+ eid, result_pool));
+ *s_p = apr_psprintf(result_pool,
+ "element-merge conflict:\n"
+ " %s",
+ id_str);
+ return SVN_NO_ERROR;
+}
+
+/* A name-clash conflict description.
+ */
+typedef struct name_clash_conflict_t
+{
+ int parent_eid;
+ const char *name;
+ /* All EIDs that conflict with each other: hash of (eid -> irrelevant). */
+ apr_hash_t *elements;
+} name_clash_conflict_t;
+
+static name_clash_conflict_t *
+name_clash_conflict_create(int parent_eid,
+ const char *name,
+ apr_pool_t *result_pool)
+{
+ name_clash_conflict_t *c = apr_pcalloc(result_pool, sizeof(*c));
+
+ c->parent_eid = parent_eid;
+ c->name = apr_pstrdup(result_pool, name);
+ c->elements = apr_hash_make(result_pool);
+ return c;
+}
+
+static svn_error_t *
+name_clash_conflict_str(const char **s_p,
+ conflict_storage_t *conflict_storage,
+ name_clash_conflict_t *c,
+ apr_pool_t *result_pool)
+{
+ apr_hash_index_t *hi2;
+ const char *s = "name-clash conflict: elements";
+
+ for (hi2 = apr_hash_first(result_pool, c->elements);
+ hi2; hi2 = apr_hash_next(hi2))
+ {
+ int eid = svn_eid__hash_this_key(hi2);
+ const char *id_str;
+
+ SVN_ERR(merged_element_id_str(&id_str, conflict_storage,
+ eid, result_pool));
+ s = apr_psprintf(result_pool,
+ "%s\n"
+ " %s",
+ s, id_str);
+ }
+ *s_p = s;
+ return SVN_NO_ERROR;
+}
+
+/* A cycle conflict description.
+ */
+typedef struct cycle_conflict_t
+{
+ /* All EIDs that conflict with each other: hash of (eid -> irrelevant). */
+ apr_hash_t *elements;
+} cycle_conflict_t;
+
+static cycle_conflict_t *
+cycle_conflict_create(apr_pool_t *result_pool)
+{
+ cycle_conflict_t *c = apr_pcalloc(result_pool, sizeof(*c));
+
+ c->elements = apr_hash_make(result_pool);
+ return c;
+}
+
+static svn_error_t *
+cycle_conflict_str(const char **s_p,
+ conflict_storage_t *conflict_storage,
+ cycle_conflict_t *c,
+ int eid,
+ apr_pool_t *result_pool)
+{
+ svn_element__content_t *element = svn_eid__hash_get(c->elements, eid);
+ const char *s
+ = apr_psprintf(result_pool, "element '%s' has cyclic parentage",
+ element->name);
+ int this_eid = eid;
+
+ do
+ {
+ const char *id_str;
+
+ SVN_ERR(merged_element_id_str(&id_str, conflict_storage,
+ this_eid, result_pool));
+ s = apr_psprintf(result_pool,
+ "%s\n"
+ " %s",
+ s, id_str);
+ element = svn_eid__hash_get(c->elements, this_eid);
+ this_eid = element->parent_eid;
+ }
+ while (this_eid != eid);
+
+ *s_p = s;
+ return SVN_NO_ERROR;
+}
+
+/* An orphan conflict description.
+ */
+typedef struct orphan_conflict_t
+{
+ svn_element__content_t *element;
+} orphan_conflict_t;
+
+static orphan_conflict_t *
+orphan_conflict_create(svn_element__content_t *element,
+ apr_pool_t *result_pool)
+{
+ orphan_conflict_t *c = apr_pcalloc(result_pool, sizeof(*c));
+
+ c->element = svn_element__content_dup(element, result_pool);
+ return c;
+}
+
+static svn_error_t *
+orphan_conflict_str(const char **s_p,
+ conflict_storage_t *conflict_storage,
+ orphan_conflict_t *c,
+ int eid,
+ apr_pool_t *result_pool)
+{
+ const char *id_str;
+ const char *parent_id_str;
+
+ SVN_ERR(merged_element_id_str(&id_str, conflict_storage,
+ eid, result_pool));
+ SVN_ERR(merged_element_id_str(&parent_id_str, conflict_storage,
+ c->element->parent_eid, result_pool));
+ *s_p = apr_psprintf(result_pool,
+ "orphan conflict: parent (%s) does not exist "
+ "for the following child:\n"
+ " %s",
+ parent_id_str, id_str);
+ return SVN_NO_ERROR;
+}
+
+/* */
+static conflict_storage_t *
+conflict_storage_create(apr_pool_t *result_pool)
+{
+ conflict_storage_t *c = apr_pcalloc(result_pool, sizeof(*c));
+
+ return c;
+}
+
+svn_error_t *
+svnmover_display_conflicts(conflict_storage_t *conflict_storage,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_index_t *hi;
+
+ svnmover_notify(_("Conflicts:"));
+
+ for (hi = apr_hash_first(scratch_pool,
+ conflict_storage->element_merge_conflicts);
+ hi; hi = apr_hash_next(hi))
+ {
+ int eid = svn_eid__hash_this_key(hi);
+ element_merge3_conflict_t *c = apr_hash_this_val(hi);
+ const char *id_string = apr_psprintf(scratch_pool, "e%d", eid);
+ const char *c_str;
+
+ SVN_ERR(element_merge3_conflict_str(&c_str, conflict_storage,
+ c, eid, scratch_pool));
+ if (the_ui_mode == UI_MODE_EIDS)
+ {
+ svnmover_notify(" %s: %s", id_string, c_str);
+ }
+ else
+ {
+ svnmover_notify(" %s", c_str);
+ }
+ }
+ for (hi = apr_hash_first(scratch_pool,
+ conflict_storage->name_clash_conflicts);
+ hi; hi = apr_hash_next(hi))
+ {
+ const char *id_string = apr_hash_this_key(hi);
+ name_clash_conflict_t *c = apr_hash_this_val(hi);
+ const char *c_str;
+
+ SVN_ERR(name_clash_conflict_str(&c_str, conflict_storage,
+ c, scratch_pool));
+ if (the_ui_mode == UI_MODE_EIDS)
+ {
+ svnmover_notify(" %s: %s", id_string, c_str);
+ }
+ else
+ {
+ svnmover_notify(" %s", c_str);
+ }
+ }
+ for (hi = apr_hash_first(scratch_pool,
+ conflict_storage->cycle_conflicts);
+ hi; hi = apr_hash_next(hi))
+ {
+ int eid = svn_eid__hash_this_key(hi);
+ cycle_conflict_t *c = apr_hash_this_val(hi);
+ const char *id_string = apr_psprintf(scratch_pool, "e%d", eid);
+ const char *c_str;
+
+ SVN_ERR(cycle_conflict_str(&c_str, conflict_storage,
+ c, eid, scratch_pool));
+ if (the_ui_mode == UI_MODE_EIDS)
+ {
+ svnmover_notify(" %s: %s", id_string, c_str);
+ }
+ else
+ {
+ svnmover_notify(" %s", c_str);
+ }
+ }
+ for (hi = apr_hash_first(scratch_pool,
+ conflict_storage->orphan_conflicts);
+ hi; hi = apr_hash_next(hi))
+ {
+ int eid = svn_eid__hash_this_key(hi);
+ orphan_conflict_t *c = apr_hash_this_val(hi);
+ const char *id_string = apr_psprintf(scratch_pool, "e%d", eid);
+ const char *c_str;
+
+ SVN_ERR(orphan_conflict_str(&c_str, conflict_storage,
+ c, eid, scratch_pool));
+ if (the_ui_mode == UI_MODE_EIDS)
+ {
+ svnmover_notify(" %s: %s", id_string, c_str);
+ }
+ else
+ {
+ svnmover_notify(" %s", c_str);
+ }
+ }
+
+ svnmover_notify(_("Summary of conflicts:\n"
+ " %d element-merge conflicts\n"
+ " %d name-clash conflicts\n"
+ " %d cycle conflicts\n"
+ " %d orphan conflicts\n"),
+ apr_hash_count(conflict_storage->element_merge_conflicts),
+ apr_hash_count(conflict_storage->name_clash_conflicts),
+ apr_hash_count(conflict_storage->cycle_conflicts),
+ apr_hash_count(conflict_storage->orphan_conflicts));
+ return SVN_NO_ERROR;
+}
+
+enum conflict_kind_t { conflict_kind_element_merge,
+ conflict_kind_clash,
+ conflict_kind_cycle,
+ conflict_kind_orphan };
+
+/* */
+typedef struct conflict_object_t
+{
+ enum conflict_kind_t conflict_kind;
+ apr_hash_t *conflicts;
+ const void *key;
+} conflict_object_t;
+
+/* */
+static conflict_object_t *
+conflict_object_create(enum conflict_kind_t conflict_kind,
+ apr_hash_t *conflicts,
+ const void *key,
+ apr_pool_t *result_pool)
+{
+ conflict_object_t *c = apr_pcalloc(result_pool, sizeof(*c));
+
+ c->conflict_kind = conflict_kind;
+ c->conflicts = conflicts;
+ c->key = (conflict_kind == conflict_kind_clash)
+ ? apr_pstrdup(result_pool, key)
+ : apr_pmemdup(result_pool, key, sizeof(int));
+ return c;
+}
+
+static svn_error_t *
+find_conflict(conflict_object_t **conflict_p,
+ conflict_storage_t *conflicts,
+ const char *id_string,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ *conflict_p = NULL;
+
+ if (id_string[0] == 'e')
+ {
+ int which_eid = atoi(id_string + 1);
+
+ if (svn_eid__hash_get(conflicts->element_merge_conflicts, which_eid))
+ {
+ *conflict_p
+ = conflict_object_create(conflict_kind_element_merge,
+ conflicts->element_merge_conflicts,
+ &which_eid, result_pool);
+ }
+ if (svn_eid__hash_get(conflicts->cycle_conflicts, which_eid))
+ {
+ *conflict_p
+ = conflict_object_create(conflict_kind_cycle,
+ conflicts->cycle_conflicts,
+ &which_eid, result_pool);
+ }
+ if (svn_eid__hash_get(conflicts->orphan_conflicts, which_eid))
+ {
+ *conflict_p
+ = conflict_object_create(conflict_kind_orphan,
+ conflicts->orphan_conflicts,
+ &which_eid, result_pool);
+ }
+ }
+ else
+ {
+ if (svn_hash_gets(conflicts->name_clash_conflicts, id_string))
+ {
+ *conflict_p
+ = conflict_object_create(conflict_kind_clash,
+ conflicts->name_clash_conflicts,
+ id_string, result_pool);
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
+svn_error_t *
+svnmover_conflict_resolved(conflict_storage_t *conflicts,
+ const char *id_string,
+ apr_pool_t *scratch_pool)
+{
+ conflict_object_t *conflict;
+
+ SVN_ERR(find_conflict(&conflict, conflicts, id_string,
+ scratch_pool, scratch_pool));
+ if (! conflict)
+ {
+ return svn_error_createf(SVN_BRANCH__ERR, NULL,
+ _("Conflict '%s' not found"), id_string);
+ }
+
+ if (conflict->conflict_kind == conflict_kind_clash)
+ {
+ svn_hash_sets(conflict->conflicts, conflict->key, NULL);
+ }
+ else
+ {
+ apr_hash_set(conflict->conflicts, conflict->key, sizeof (int), NULL);
+ }
+ svnmover_notify("Marked conflict '%s' as resolved", id_string);
+ return SVN_NO_ERROR;
+}
+
+svn_boolean_t
+svnmover_any_conflicts(const conflict_storage_t *conflicts)
+{
+ return conflicts
+ && (apr_hash_count(conflicts->element_merge_conflicts)
+ || apr_hash_count(conflicts->name_clash_conflicts)
+ || apr_hash_count(conflicts->cycle_conflicts)
+ || apr_hash_count(conflicts->orphan_conflicts));
+}
+
+/* Merge the payload for one element.
+ *
+ * If there is no conflict, set *CONFLICT_P to FALSE and *RESULT_P to the
+ * merged element; otherwise set *CONFLICT_P to TRUE and *RESULT_P to NULL.
+ * Note that *RESULT_P can be null, indicating a deletion.
+ *
+ * This handles any case where at least one of (SIDE1, SIDE2, YCA) exists.
+ *
+ * Allocate the result in RESULT_POOL and/or as pointers to the inputs.
+ */
+static void
+payload_merge(svn_element__payload_t **result_p,
+ svn_boolean_t *conflict_p,
+ int eid,
+ svn_element__payload_t *side1,
+ svn_element__payload_t *side2,
+ svn_element__payload_t *yca,
+ const merge_conflict_policy_t *policy,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ svn_boolean_t conflict = FALSE;
+ svn_element__payload_t *result = NULL;
+
+ if (yca && side1 && side2)
+ {
+ if (svn_element__payload_equal(side1, yca, scratch_pool))
+ {
+ result = side2;
+ }
+ else if (svn_element__payload_equal(side2, yca, scratch_pool))
+ {
+ result = side1;
+ }
+ else if (policy->merge_double_modify
+ && svn_element__payload_equal(side1, side2, scratch_pool))
+ {
+ /*SVN_DBG(("e%d double modify: ... -> { ... | ... }",
+ eid));*/
+ result = side1;
+ }
+ else
+ {
+ /* ### Need not conflict if can merge props and text separately. */
+
+ /*SVN_DBG(("e%d conflict: payload: ... -> { ... | ... }",
+ eid));*/
+ conflict = TRUE;
+ }
+ }
+
+ *result_p = result;
+ *conflict_p = conflict;
+}
+
+/* Merge the content for one element.
+ *
+ * If there is no conflict, set *CONFLICT_P to FALSE and *RESULT_P to the
+ * merged element; otherwise set *CONFLICT_P to TRUE and *RESULT_P to NULL.
+ * Note that *RESULT_P can be null, indicating a deletion.
+ *
+ * This handles any case where at least one of (SIDE1, SIDE2, YCA) exists.
+ *
+ * Allocate the result in RESULT_POOL and/or as pointers to the inputs.
+ */
+static void
+element_merge(svn_element__content_t **result_p,
+ element_merge3_conflict_t **conflict_p,
+ int eid,
+ svn_element__content_t *side1,
+ svn_element__content_t *side2,
+ svn_element__content_t *yca,
+ const merge_conflict_policy_t *policy,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ svn_boolean_t same1 = svn_element__content_equal(yca, side1, scratch_pool);
+ svn_boolean_t same2 = svn_element__content_equal(yca, side2, scratch_pool);
+ svn_boolean_t conflict = FALSE;
+ svn_element__content_t *result = NULL;
+
+ if (same1)
+ {
+ result = side2;
+ }
+ else if (same2)
+ {
+ result = side1;
+ }
+ else if (yca && side1 && side2)
+ {
+ /* All three sides are different, and all exist */
+ result = apr_pmemdup(result_pool, yca, sizeof(*result));
+
+ /* merge the parent-eid */
+ if (side1->parent_eid == yca->parent_eid)
+ {
+ result->parent_eid = side2->parent_eid;
+ }
+ else if (side2->parent_eid == yca->parent_eid)
+ {
+ result->parent_eid = side1->parent_eid;
+ }
+ else if (policy->merge_double_reparent
+ && side1->parent_eid == side2->parent_eid)
+ {
+ /*SVN_DBG(("e%d double reparent: e%d -> { e%d | e%d }",
+ eid, yca->parent_eid, side1->parent_eid, side2->parent_eid));*/
+ result->parent_eid = side1->parent_eid;
+ }
+ else
+ {
+ /*SVN_DBG(("e%d conflict: parent: e%d -> { e%d | e%d }",
+ eid, yca->parent_eid, side1->parent_eid, side2->parent_eid));*/
+ conflict = TRUE;
+ }
+
+ /* merge the name */
+ if (strcmp(side1->name, yca->name) == 0)
+ {
+ result->name = side2->name;
+ }
+ else if (strcmp(side2->name, yca->name) == 0)
+ {
+ result->name = side1->name;
+ }
+ else if (policy->merge_double_rename
+ && strcmp(side1->name, side2->name) == 0)
+ {
+ /*SVN_DBG(("e%d double rename: %s -> { %s | %s }",
+ eid, yca->name, side1->name, side2->name));*/
+ result->name = side1->name;
+ }
+ else
+ {
+ /*SVN_DBG(("e%d conflict: name: %s -> { %s | %s }",
+ eid, yca->name, side1->name, side2->name));*/
+ conflict = TRUE;
+ }
+
+ /* merge the payload */
+ {
+ svn_boolean_t payload_conflict;
+
+ payload_merge(&result->payload, &payload_conflict,
+ eid, side1->payload, side2->payload, yca->payload,
+ policy, result_pool, scratch_pool);
+ if (payload_conflict)
+ conflict = TRUE;
+ }
+ }
+ else if (! side1 && ! side2)
+ {
+ /* Double delete (as we assume at least one of YCA/SIDE1/SIDE2 exists) */
+ if (policy->merge_double_delete)
+ {
+ /*SVN_DBG(("e%d double delete",
+ eid));*/
+ result = side1;
+ }
+ else
+ {
+ /*SVN_DBG(("e%d conflict: delete vs. delete",
+ eid));*/
+ conflict = TRUE;
+ }
+ }
+ else if (side1 && side2)
+ {
+ /* Double add (as we already handled the case where YCA also exists) */
+ /* May be allowed for equal content of a normal element (not subbranch) */
+ if (policy->merge_double_add
+ && !side1->payload->is_subbranch_root
+ && !side2->payload->is_subbranch_root
+ && svn_element__content_equal(side1, side2, scratch_pool))
+ {
+ /*SVN_DBG(("e%d double add",
+ eid));*/
+ result = side1;
+ }
+ else
+ {
+ /*SVN_DBG(("e%d conflict: add vs. add (%s)",
+ eid,
+ svn_element_content_equal(side1, side2, scratch_pool)
+ ? "same content" : "different content"));*/
+ conflict = TRUE;
+ }
+ }
+ else
+ {
+ /* The remaining cases must be delete vs. modify */
+ /*SVN_DBG(("e%d conflict: delete vs. modify: %d -> { %d | %d }",
+ eid, !!yca, !!side1, !!side2));*/
+ conflict = TRUE;
+ }
+
+ *result_p = result;
+ *conflict_p
+ = conflict ? element_merge3_conflict_create(yca, side1, side2,
+ result_pool) : NULL;
+}
+
+static svn_error_t *
+branch_merge_subtree_r(svn_branch__txn_t *edit_txn,
+ svn_branch__state_t *edit_branch,
+ conflict_storage_t **conflict_storage_p,
+ const svn_branch__el_rev_id_t *src,
+ const svn_branch__el_rev_id_t *tgt,
+ const svn_branch__el_rev_id_t *yca,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool);
+
+/* Merge the subbranch of {SRC, TGT, YCA} found at EID.
+ */
+static svn_error_t *
+merge_subbranch(svn_branch__txn_t *edit_txn,
+ svn_branch__state_t *edit_branch,
+ const svn_branch__el_rev_id_t *src,
+ const svn_branch__el_rev_id_t *tgt,
+ const svn_branch__el_rev_id_t *yca,
+ int eid,
+ apr_pool_t *scratch_pool)
+{
+ svn_branch__state_t *src_subbranch;
+ svn_branch__state_t *tgt_subbranch;
+ svn_branch__state_t *yca_subbranch;
+ svn_branch__el_rev_id_t *subbr_src = NULL;
+ svn_branch__el_rev_id_t *subbr_tgt = NULL;
+ svn_branch__el_rev_id_t *subbr_yca = NULL;
+
+ SVN_ERR(svn_branch__get_subbranch_at_eid(src->branch, &src_subbranch,
+ eid, scratch_pool));
+ SVN_ERR(svn_branch__get_subbranch_at_eid(tgt->branch, &tgt_subbranch,
+ eid, scratch_pool));
+ SVN_ERR(svn_branch__get_subbranch_at_eid(yca->branch, &yca_subbranch,
+ eid, scratch_pool));
+ if (src_subbranch)
+ subbr_src = svn_branch__el_rev_id_create(
+ src_subbranch, svn_branch__root_eid(src_subbranch),
+ src->rev, scratch_pool);
+ if (tgt_subbranch)
+ subbr_tgt = svn_branch__el_rev_id_create(
+ tgt_subbranch, svn_branch__root_eid(tgt_subbranch),
+ tgt->rev, scratch_pool);
+ if (yca_subbranch)
+ subbr_yca = svn_branch__el_rev_id_create(
+ yca_subbranch, svn_branch__root_eid(yca_subbranch),
+ yca->rev, scratch_pool);
+
+ if (subbr_src && subbr_tgt && subbr_yca) /* ?edit vs. ?edit */
+ {
+ conflict_storage_t *conflict_storage;
+ const char *new_branch_id
+ = svn_branch__id_nest(svn_branch__get_id(edit_branch, scratch_pool),
+ eid, scratch_pool);
+ svn_branch__rev_bid_eid_t *from
+ = svn_branch__rev_bid_eid_create(tgt_subbranch->txn->rev,
+ svn_branch__get_id(tgt_subbranch,
+ scratch_pool),
+ svn_branch__root_eid(tgt_subbranch),
+ scratch_pool);
+ svn_branch__state_t *edit_subbranch;
+
+ SVN_ERR(svn_branch__txn_open_branch(edit_txn, &edit_subbranch,
+ new_branch_id, from->eid, from,
+ scratch_pool, scratch_pool));
+
+ /* subbranch possibly changed in source => merge */
+ SVN_ERR(branch_merge_subtree_r(edit_txn, edit_subbranch,
+ &conflict_storage,
+ subbr_src, subbr_tgt, subbr_yca,
+ scratch_pool, scratch_pool));
+ /* ### store this branch's conflict_storage somewhere ... */
+ }
+ else if (subbr_src && subbr_yca) /* ?edit vs. delete */
+ {
+ /* ### possible conflict (edit vs. delete) */
+ }
+ else if (subbr_tgt && subbr_yca) /* delete vs. ?edit */
+ {
+ /* ### possible conflict (delete vs. edit) */
+ }
+ else if (subbr_src && subbr_tgt) /* double add */
+ {
+ /* ### conflict */
+ }
+ else if (subbr_src) /* added on source branch */
+ {
+ const char *new_branch_id
+ = svn_branch__id_nest(svn_branch__get_id(edit_branch, scratch_pool),
+ eid, scratch_pool);
+ svn_branch__rev_bid_eid_t *from
+ = svn_branch__rev_bid_eid_create(src_subbranch->txn->rev,
+ svn_branch__get_id(src_subbranch,
+ scratch_pool),
+ svn_branch__root_eid(src_subbranch),
+ scratch_pool);
+
+ SVN_ERR(svn_branch__txn_open_branch(edit_txn, NULL /*new_branch_p*/,
+ new_branch_id, from->eid, from,
+ scratch_pool, scratch_pool));
+ }
+ else if (subbr_tgt) /* added on target branch */
+ {
+ const char *new_branch_id
+ = svn_branch__id_nest(svn_branch__get_id(edit_branch, scratch_pool),
+ eid, scratch_pool);
+ svn_branch__rev_bid_eid_t *from
+ = svn_branch__rev_bid_eid_create(tgt_subbranch->txn->rev,
+ svn_branch__get_id(tgt_subbranch,
+ scratch_pool),
+ svn_branch__root_eid(tgt_subbranch),
+ scratch_pool);
+
+ SVN_ERR(svn_branch__txn_open_branch(edit_txn, NULL /*new_branch_p*/,
+ new_branch_id, from->eid, from,
+ scratch_pool, scratch_pool));
+ }
+ else if (subbr_yca) /* double delete */
+ {
+ /* ### conflict? policy option? */
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* */
+static int
+sort_compare_items_by_peid_and_name(const svn_sort__item_t *a,
+ const svn_sort__item_t *b)
+{
+ svn_element__content_t *element_a = a->value;
+ svn_element__content_t *element_b = b->value;
+
+ if (element_a->parent_eid != element_b->parent_eid)
+ return element_a->parent_eid - element_b->parent_eid;
+ return strcmp(element_a->name, element_b->name);
+}
+
+/* Return all (key -> name_clash_conflict_t) name clash conflicts in BRANCH.
+ */
+static svn_error_t *
+detect_clashes(apr_hash_t **clashes_p,
+ svn_branch__state_t *branch,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *clashes = apr_hash_make(result_pool);
+ svn_element__tree_t *elements;
+ svn_eid__hash_iter_t *ei;
+ int prev_eid = -1;
+ svn_element__content_t *prev_element = NULL;
+
+ SVN_ERR(svn_branch__state_get_elements(branch, &elements, scratch_pool));
+ for (SVN_EID__HASH_ITER_SORTED(ei, elements->e_map,
+ sort_compare_items_by_peid_and_name,
+ scratch_pool))
+ {
+ int eid = ei->eid;
+ svn_element__content_t *element = ei->val;
+
+ if (prev_element
+ && element->parent_eid == prev_element->parent_eid
+ && strcmp(element->name, prev_element->name) == 0)
+ {
+ const char *key = apr_psprintf(result_pool, "%d/%s",
+ element->parent_eid, element->name);
+ name_clash_conflict_t *c;
+
+ c = svn_hash_gets(clashes, key);
+ if (!c)
+ {
+ c = name_clash_conflict_create(
+ element->parent_eid, element->name,
+ result_pool);
+ svn_hash_sets(clashes, key, c);
+ }
+ svn_eid__hash_set(c->elements, eid, &c);
+ svn_eid__hash_set(c->elements, prev_eid, &c);
+ }
+ prev_eid = eid;
+ prev_element = element;
+ }
+
+ *clashes_p = clashes;
+ return SVN_NO_ERROR;
+}
+
+/* For each element in the cycle starting at ONE_EID in EIDS_VISITED,
+ * record an entry in CYCLES[this_eid] mapping to a cycle_conflict_t.
+ * Each such new entry will point to the same cycle_conflict_t. This
+ * cycle_conflict_t will contain the list of elements in the cycle.
+ *
+ * ONE_EID should identify a member of a simple cycle, not an element
+ * that merely has a parent or ancestor in a simple cycle.
+ */
+static svn_error_t *
+record_cycle(apr_hash_t *cycles,
+ apr_hash_t *eids_visited,
+ int one_eid,
+ apr_pool_t *result_pool)
+{
+ cycle_conflict_t *c = cycle_conflict_create(result_pool);
+ int this_eid = one_eid;
+
+ do
+ {
+ svn_element__content_t *element
+ = svn_eid__hash_get(eids_visited, this_eid);
+
+ svn_eid__hash_set(cycles, this_eid, c);
+ svn_eid__hash_set(c->elements, this_eid, element);
+
+ this_eid = element->parent_eid;
+ assert(this_eid != -1);
+ }
+ while (this_eid != one_eid);
+
+ return SVN_NO_ERROR;
+}
+
+/* Return all (eid -> cycle_conflict_t) cycle conflicts in BRANCH.
+
+ * ### This implementation is crude: it finds all cycles, but doesn't
+ * report them minimally. It reports each element that leads to a cycle,
+ * without isolating the minimal cycles nor eliminating duplicates.
+ */
+static svn_error_t *
+detect_cycles(apr_hash_t **cycles_p,
+ svn_branch__state_t *branch,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *cycles = apr_hash_make(result_pool);
+ apr_hash_index_t *hi;
+ svn_element__tree_t *elements;
+
+ SVN_ERR(svn_branch__state_get_elements(branch, &elements, scratch_pool));
+ for (hi = apr_hash_first(scratch_pool, elements->e_map);
+ hi; hi = apr_hash_next(hi))
+ {
+ int eid = svn_eid__hash_this_key(hi);
+ svn_element__content_t *element = apr_hash_this_val(hi);
+ apr_hash_t *eids_visited;
+ int this_eid;
+
+ /* If the element EID is already recorded as a member of a cycle,
+ there's nothing more to do for it. */
+ if (svn_eid__hash_get(cycles, eid))
+ {
+ continue;
+ }
+
+ eids_visited = apr_hash_make(scratch_pool);
+
+ /* See if we can trace the parentage of EID back to the branch root
+ without finding a cycle. If we find a cycle, store a conflict. */
+ for (this_eid = eid;
+ element = svn_eid__hash_get(elements->e_map, this_eid),
+ element && element->parent_eid != -1;
+ this_eid = element->parent_eid)
+ {
+ svn_eid__hash_set(eids_visited, this_eid, element);
+
+ /* If the element EID is attached to an element of a previously
+ detected cycle, then it's not interesting in itself. */
+ if (svn_eid__hash_get(cycles, element->parent_eid))
+ {
+ break;
+ }
+ /* If this element's parent-EID is already in the path of EIDs
+ visited from EID to the root, then we have found a cycle. */
+ if (svn_eid__hash_get(eids_visited, element->parent_eid))
+ {
+ SVN_ERR(record_cycle(cycles, eids_visited, this_eid,
+ result_pool));
+ break;
+ }
+ }
+ }
+
+ *cycles_p = cycles;
+ return SVN_NO_ERROR;
+}
+
+/* Return all (eid -> orphan_conflict_t) orphan conflicts in BRANCH.
+ */
+static svn_error_t *
+detect_orphans(apr_hash_t **orphans_p,
+ svn_branch__state_t *branch,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *orphans = apr_hash_make(result_pool);
+ apr_hash_index_t *hi;
+ svn_element__tree_t *elements;
+
+ SVN_ERR(svn_branch__state_get_elements(branch, &elements, scratch_pool));
+ for (hi = apr_hash_first(scratch_pool, elements->e_map);
+ hi; hi = apr_hash_next(hi))
+ {
+ int eid = svn_eid__hash_this_key(hi);
+ svn_element__content_t *element = apr_hash_this_val(hi);
+
+ if (eid != elements->root_eid
+ && ! svn_element__tree_get(elements, element->parent_eid))
+ {
+ orphan_conflict_t *c;
+
+ c = orphan_conflict_create(element, result_pool);
+ svn_eid__hash_set(orphans, eid, c);
+ }
+ }
+
+ *orphans_p = orphans;
+ return SVN_NO_ERROR;
+}
+
+/* Merge ...
+ *
+ * The elements to merge are the union of the elements in the three input
+ * subtrees (SRC, TGT, YCA).
+ *
+ * Merge any sub-branches in the same way, recursively.
+ *
+ * ### TODO: Store the merge result separately, without overwriting the
+ * target input state, so that the three input states are all available
+ * for reference while resolving conflicts.
+ */
+static svn_error_t *
+branch_merge_subtree_r(svn_branch__txn_t *edit_txn,
+ svn_branch__state_t *edit_branch,
+ conflict_storage_t **conflict_storage_p,
+ const svn_branch__el_rev_id_t *src,
+ const svn_branch__el_rev_id_t *tgt,
+ const svn_branch__el_rev_id_t *yca,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ svn_branch__subtree_t *s_src, *s_tgt, *s_yca;
+ apr_hash_t *diff_yca_src, *diff_yca_tgt;
+ apr_hash_t *e_conflicts = apr_hash_make(result_pool);
+ conflict_storage_t *conflict_storage = conflict_storage_create(result_pool);
+ svn_element__tree_t *src_elements, *tgt_elements, *yca_elements;
+ apr_hash_t *all_elements;
+ svn_eid__hash_iter_t *ei;
+ const merge_conflict_policy_t policy = { TRUE, TRUE, TRUE, TRUE, TRUE };
+ apr_pool_t *iterpool = svn_pool_create(scratch_pool);
+
+ /*SVN_DBG(("merge src: r%2ld %s e%3d",
+ src->rev,
+ svn_branch__get_id(src->branch, scratch_pool), src->eid));*/
+ /*SVN_DBG(("merge tgt: r%2ld %s e%3d",
+ tgt->rev,
+ svn_branch__get_id(tgt->branch, scratch_pool), tgt->eid));*/
+ /*SVN_DBG(("merge yca: r%2ld %s e%3d",
+ yca->rev,
+ svn_branch__get_id(yca->branch, scratch_pool), yca->eid));*/
+
+ svnmover_notify_v("merging into branch %s",
+ edit_branch->bid);
+ /*
+ for (eid, diff1) in element_differences(YCA, FROM):
+ diff2 = element_diff(eid, YCA, TO)
+ if diff1 and diff2:
+ result := element_merge(diff1, diff2)
+ elif diff1:
+ result := diff1.right
+ # else no change
+ */
+ SVN_ERR(svn_branch__get_subtree(src->branch, &s_src, src->eid, scratch_pool));
+ SVN_ERR(svn_branch__get_subtree(tgt->branch, &s_tgt, tgt->eid, scratch_pool));
+ SVN_ERR(svn_branch__get_subtree(yca->branch, &s_yca, yca->eid, scratch_pool));
+
+ /* ALL_ELEMENTS enumerates the elements in union of subtrees YCA,SRC,TGT. */
+ all_elements = hash_overlay(s_src->tree->e_map,
+ s_tgt->tree->e_map);
+ all_elements = hash_overlay(s_yca->tree->e_map,
+ all_elements);
+
+ SVN_ERR(svn_branch__state_get_elements(src->branch, &src_elements,
+ scratch_pool));
+ SVN_ERR(svn_branch__state_get_elements(tgt->branch, &tgt_elements,
+ scratch_pool));
+ SVN_ERR(svn_branch__state_get_elements(yca->branch, &yca_elements,
+ scratch_pool));
+
+ /* Find the two changes for each element that is in any of the subtrees,
+ even for an element that is (for example) not in YCA or SRC but has
+ been moved into TGT. */
+ SVN_ERR(svnmover_element_differences(&diff_yca_src,
+ yca_elements, src_elements,
+ all_elements,
+ scratch_pool, scratch_pool));
+ /* ### We only need to know about YCA:TGT differences for elements that
+ differ in YCA:SRC, but right now we ask for all differences. */
+ SVN_ERR(svnmover_element_differences(&diff_yca_tgt,
+ yca_elements, tgt_elements,
+ all_elements,
+ scratch_pool, scratch_pool));
+
+ for (SVN_EID__HASH_ITER_SORTED_BY_EID(ei, all_elements, scratch_pool))
+ {
+ int eid = ei->eid;
+ svn_element__content_t **e_yca_src
+ = svn_eid__hash_get(diff_yca_src, eid);
+ svn_element__content_t **e_yca_tgt
+ = svn_eid__hash_get(diff_yca_tgt, eid);
+ svn_element__content_t *e_yca;
+ svn_element__content_t *e_src;
+ svn_element__content_t *e_tgt;
+ svn_element__content_t *result;
+ element_merge3_conflict_t *conflict;
+
+ svn_pool_clear(iterpool);
+
+ /* If an element hasn't changed in the source branch, there is
+ no need to do anything with it in the target branch. We could
+ use element_merge() for any case where at least one of (SRC,
+ TGT, YCA) exists, but we choose to skip it when SRC == YCA. */
+ if (! e_yca_src)
+ {
+ /* Still need to merge any subbranch linked to this element.
+ There were no changes to the link element but that doesn't
+ mean there were no changes to the linked branch. */
+ SVN_ERR(merge_subbranch(edit_txn, edit_branch,
+ src, tgt, yca, eid, iterpool));
+
+ continue;
+ }
+
+ e_yca = e_yca_src[0];
+ e_src = e_yca_src[1];
+ e_tgt = e_yca_tgt ? e_yca_tgt[1] : e_yca_src[0];
+
+ /* If some but not all of the three subtree-root elements are branch
+ roots, then we will see the parentage of this element changing to
+ or from 'no parent' in one or both sides of the merge. We want to
+ ignore this part of the difference, as parentage of a subtree root
+ element is by definition not part of a 'subtree', so blank it out.
+ (If we merged it, it could break the single-rooted-tree invariant
+ of the target branch.)
+ */
+ if (is_branch_root_element(src->branch, eid)
+ || is_branch_root_element(tgt->branch, eid)
+ || is_branch_root_element(yca->branch, eid))
+ {
+ e_src = svn_element__content_create(
+ e_tgt->parent_eid, e_tgt->name, e_src->payload, iterpool);
+ e_yca = svn_element__content_create(
+ e_tgt->parent_eid, e_tgt->name, e_yca->payload, iterpool);
+ }
+
+ element_merge(&result, &conflict,
+ eid, e_src, e_tgt, e_yca,
+ &policy,
+ scratch_pool, scratch_pool);
+
+ if (conflict)
+ {
+ svnmover_notify_v("! e%d <conflict>", eid);
+ svn_eid__hash_set(e_conflicts, eid,
+ element_merge3_conflict_dup(conflict, result_pool));
+ }
+ else
+ {
+ if (e_tgt && result)
+ {
+ svnmover_notify_v("M/V e%d %s%s",
+ eid, result->name,
+ subbranch_str(tgt->branch, eid, iterpool));
+ }
+ else if (e_tgt)
+ {
+ svnmover_notify_v("D e%d %s%s",
+ eid, e_yca->name,
+ subbranch_str(yca->branch, eid, iterpool));
+
+ /* ### If this is a subbranch-root element being deleted, shouldn't
+ we see if there were any changes to be merged in the subbranch,
+ and raise a delete-vs-edit conflict if so? */
+ }
+ else if (result)
+ {
+ svnmover_notify_v("A e%d %s%s",
+ eid, result->name,
+ subbranch_str(src->branch, eid, iterpool));
+ }
+
+ SVN_ERR(svn_branch__state_set_element(tgt->branch, eid,
+ result, iterpool));
+
+ if (result)
+ {
+ SVN_ERR(merge_subbranch(edit_txn, edit_branch,
+ src, tgt, yca, eid, iterpool));
+ }
+ }
+ }
+ svn_pool_destroy(iterpool);
+
+ conflict_storage->yca_branch = yca->branch;
+ conflict_storage->src_branch = src->branch;
+ conflict_storage->tgt_branch = tgt->branch;
+ conflict_storage->merged_branch = tgt->branch; /* ### should be != tgt */
+ conflict_storage->element_merge_conflicts = e_conflicts;
+ SVN_ERR(detect_clashes(&conflict_storage->name_clash_conflicts,
+ tgt->branch,
+ result_pool, scratch_pool));
+ SVN_ERR(detect_cycles(&conflict_storage->cycle_conflicts,
+ tgt->branch,
+ result_pool, scratch_pool));
+ SVN_ERR(detect_orphans(&conflict_storage->orphan_conflicts,
+ tgt->branch,
+ result_pool, scratch_pool));
+
+ svnmover_notify_v("merging into branch %s -- finished",
+ svn_branch__get_id(tgt->branch, scratch_pool));
+
+ *conflict_storage_p = conflict_storage;
+ return SVN_NO_ERROR;
+}
+
+svn_error_t *
+svnmover_branch_merge(svn_branch__txn_t *edit_txn,
+ svn_branch__state_t *edit_branch,
+ conflict_storage_t **conflict_storage_p,
+ svn_branch__el_rev_id_t *src,
+ svn_branch__el_rev_id_t *tgt,
+ svn_branch__el_rev_id_t *yca,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ conflict_storage_t *conflicts;
+
+ /*SVN_ERR(verify_exists_in_branch(from, scratch_pool));*/
+ /*SVN_ERR(verify_exists_in_branch(to, scratch_pool));*/
+ /*SVN_ERR(verify_exists_in_branch(yca, scratch_pool));*/
+ /*SVN_ERR(verify_not_subbranch_root(from, scratch_pool));*/
+ /*SVN_ERR(verify_not_subbranch_root(to, scratch_pool));*/
+ /*SVN_ERR(verify_not_subbranch_root(yca, scratch_pool));*/
+
+ SVN_ERR(branch_merge_subtree_r(edit_txn, edit_branch,
+ &conflicts,
+ src, tgt, yca,
+ result_pool, scratch_pool));
+
+ if (conflict_storage_p)
+ {
+ if (svnmover_any_conflicts(conflicts))
+ {
+ *conflict_storage_p = conflicts;
+ }
+ else
+ {
+ *conflict_storage_p = NULL;
+ }
+ }
+ return SVN_NO_ERROR;
+}
+
diff --git a/tools/dev/svnmover/ra.c b/tools/dev/svnmover/ra.c
new file mode 100644
index 0000000..f7293b2
--- /dev/null
+++ b/tools/dev/svnmover/ra.c
@@ -0,0 +1,586 @@
+/*
+ * ra_loader.c: logic for loading different RA library implementations
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+/* ==================================================================== */
+
+/*** Includes. ***/
+#define APR_WANT_STRFUNC
+#include <apr_want.h>
+
+#include <apr.h>
+#include <apr_strings.h>
+#include <apr_pools.h>
+#include <apr_hash.h>
+#include <apr_uri.h>
+
+#include "svn_hash.h"
+#include "svn_types.h"
+#include "svn_error.h"
+#include "svn_delta.h"
+#include "svn_ra.h"
+#include "svn_dirent_uri.h"
+#include "svn_props.h"
+#include "svn_iter.h"
+
+#include "private/svn_branch_compat.h"
+#include "private/svn_branch_repos.h"
+#include "private/svn_ra_private.h"
+#include "private/svn_delta_private.h"
+#include "private/svn_string_private.h"
+#include "svnmover.h"
+#include "svn_private_config.h"
+
+
+/* Read the branching info string VALUE belonging to revision REVISION.
+ */
+static svn_error_t *
+read_rev_prop(svn_string_t **value,
+ svn_ra_session_t *ra_session,
+ const char *branch_info_dir,
+ svn_revnum_t revision,
+ apr_pool_t *result_pool)
+{
+ apr_pool_t *scratch_pool = result_pool;
+
+ if (branch_info_dir)
+ {
+ const char *file_path;
+ svn_stream_t *stream;
+ svn_error_t *err;
+
+ file_path = svn_dirent_join(branch_info_dir,
+ apr_psprintf(scratch_pool, "branch-info-r%ld",
+ revision), scratch_pool);
+ err = svn_stream_open_readonly(&stream, file_path, scratch_pool, scratch_pool);
+ if (err)
+ {
+ svn_error_clear(err);
+ *value = NULL;
+ return SVN_NO_ERROR;
+ }
+ SVN_ERR(err);
+ SVN_ERR(svn_string_from_stream2(value, stream, 0, result_pool));
+ }
+ else
+ {
+ SVN_ERR(svn_ra_rev_prop(ra_session, revision, "svn-br-info", value,
+ result_pool));
+ }
+ return SVN_NO_ERROR;
+}
+
+/* Store the branching info string VALUE belonging to revision REVISION.
+ */
+static svn_error_t *
+write_rev_prop(svn_ra_session_t *ra_session,
+ const char *branch_info_dir,
+ svn_revnum_t revision,
+ svn_string_t *value,
+ apr_pool_t *scratch_pool)
+{
+ if (branch_info_dir)
+ {
+ const char *file_path;
+ svn_error_t *err;
+
+ file_path = svn_dirent_join(branch_info_dir,
+ apr_psprintf(scratch_pool, "branch-info-r%ld",
+ revision), scratch_pool);
+ err = svn_io_file_create(file_path, value->data, scratch_pool);
+ if (err)
+ {
+ svn_error_clear(err);
+ SVN_ERR(svn_io_dir_make(branch_info_dir, APR_FPROT_OS_DEFAULT,
+ scratch_pool));
+ err = svn_io_file_create(file_path, value->data, scratch_pool);
+ }
+ SVN_ERR(err);
+ }
+ else
+ {
+ SVN_ERR(svn_ra_change_rev_prop2(ra_session, revision, "svn-br-info",
+ NULL, value, scratch_pool));
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Create a new revision-root object and read the move-tracking /
+ * branch-tracking metadata from the repository into it.
+ */
+static svn_error_t *
+branch_revision_fetch_info(svn_branch__txn_t **txn_p,
+ svn_branch__repos_t *repos,
+ svn_ra_session_t *ra_session,
+ const char *branch_info_dir,
+ svn_revnum_t revision,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ svn_string_t *value;
+ svn_stream_t *stream;
+ svn_branch__txn_t *txn;
+
+ SVN_ERR_ASSERT(SVN_IS_VALID_REVNUM(revision));
+
+ /* Read initial state from repository */
+ SVN_ERR(read_rev_prop(&value, ra_session, branch_info_dir, revision,
+ scratch_pool));
+ if (! value && revision == 0)
+ {
+ value = svn_branch__get_default_r0_metadata(scratch_pool);
+ /*SVN_DBG(("fetch_per_revision_info(r%ld): LOADED DEFAULT INFO:\n%s",
+ revision, value->data));*/
+ SVN_ERR(write_rev_prop(ra_session, branch_info_dir, revision, value,
+ scratch_pool));
+ }
+ else if (! value)
+ {
+ return svn_error_createf(SVN_BRANCH__ERR, NULL,
+ _("Move-tracking metadata not found in r%ld "
+ "in this repository. Run svnmover on an "
+ "empty repository to initialize the "
+ "metadata"), revision);
+ }
+ stream = svn_stream_from_string(value, scratch_pool);
+
+ SVN_ERR(svn_branch__txn_parse(&txn, repos, stream,
+ result_pool, scratch_pool));
+
+ /* Self-test: writing out the info should produce exactly the same string. */
+ {
+ svn_stringbuf_t *buf = svn_stringbuf_create_empty(scratch_pool);
+
+ stream = svn_stream_from_stringbuf(buf, scratch_pool);
+ SVN_ERR(svn_branch__txn_serialize(txn, stream, scratch_pool));
+ SVN_ERR(svn_stream_close(stream));
+
+ SVN_ERR_ASSERT(svn_string_compare(value,
+ svn_stringbuf__morph_into_string(buf)));
+ }
+
+ *txn_p = txn;
+ return SVN_NO_ERROR;
+}
+
+/* Fetch all element payloads in TXN.
+ */
+static svn_error_t *
+txn_fetch_payloads(svn_branch__txn_t *txn,
+ svn_branch__compat_fetch_func_t fetch_func,
+ void *fetch_baton,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ apr_array_header_t *branches = svn_branch__txn_get_branches(txn, scratch_pool);
+ int i;
+
+ /* Read payload of each element.
+ (In a real implementation, of course, we'd delay this until demanded.) */
+ for (i = 0; i < branches->nelts; i++)
+ {
+ svn_branch__state_t *branch = APR_ARRAY_IDX(branches, i, void *);
+ svn_element__tree_t *element_tree;
+ apr_hash_index_t *hi;
+
+ SVN_ERR(svn_branch__state_get_elements(branch, &element_tree,
+ scratch_pool));
+ for (hi = apr_hash_first(scratch_pool, element_tree->e_map);
+ hi; hi = apr_hash_next(hi))
+ {
+ int eid = svn_eid__hash_this_key(hi);
+ svn_element__content_t *element /*= apr_hash_this_val(hi)*/;
+
+ SVN_ERR(svn_branch__state_get_element(branch, &element,
+ eid, scratch_pool));
+ if (! element->payload->is_subbranch_root)
+ {
+ SVN_ERR(svn_branch__compat_fetch(&element->payload,
+ txn,
+ element->payload->branch_ref,
+ fetch_func, fetch_baton,
+ result_pool, scratch_pool));
+ }
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Create a new repository object and read the move-tracking /
+ * branch-tracking metadata from the repository into it.
+ */
+static svn_error_t *
+branch_repos_fetch_info(svn_branch__repos_t **repos_p,
+ svn_ra_session_t *ra_session,
+ const char *branch_info_dir,
+ svn_branch__compat_fetch_func_t fetch_func,
+ void *fetch_baton,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ svn_branch__repos_t *repos
+ = svn_branch__repos_create(result_pool);
+ svn_revnum_t base_revision;
+ svn_revnum_t r;
+
+ SVN_ERR(svn_ra_get_latest_revnum(ra_session, &base_revision, scratch_pool));
+
+ for (r = 0; r <= base_revision; r++)
+ {
+ svn_branch__txn_t *txn;
+
+ SVN_ERR(branch_revision_fetch_info(&txn,
+ repos, ra_session, branch_info_dir,
+ r,
+ result_pool, scratch_pool));
+ SVN_ERR(svn_branch__repos_add_revision(repos, txn));
+ SVN_ERR(txn_fetch_payloads(txn, fetch_func, fetch_baton,
+ result_pool, scratch_pool));
+ }
+
+ *repos_p = repos;
+ return SVN_NO_ERROR;
+}
+
+/* Return a mutable state based on revision BASE_REVISION in REPOS.
+ */
+static svn_error_t *
+branch_get_mutable_state(svn_branch__txn_t **txn_p,
+ svn_branch__repos_t *repos,
+ svn_ra_session_t *ra_session,
+ const char *branch_info_dir,
+ svn_revnum_t base_revision,
+ svn_branch__compat_fetch_func_t fetch_func,
+ void *fetch_baton,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ svn_branch__txn_t *txn;
+ apr_array_header_t *branches;
+ int i;
+
+ SVN_ERR_ASSERT(SVN_IS_VALID_REVNUM(base_revision));
+
+ SVN_ERR(branch_revision_fetch_info(&txn,
+ repos, ra_session, branch_info_dir,
+ base_revision,
+ result_pool, scratch_pool));
+ SVN_ERR_ASSERT(txn->rev == base_revision);
+ SVN_ERR(txn_fetch_payloads(txn, fetch_func, fetch_baton,
+ result_pool, scratch_pool));
+
+ /* Update all the 'predecessor' info to point to the BASE_REVISION instead
+ of to that revision's predecessor. */
+ txn->base_rev = base_revision;
+ txn->rev = SVN_INVALID_REVNUM;
+
+ branches = svn_branch__txn_get_branches(txn, scratch_pool);
+ for (i = 0; i < branches->nelts; i++)
+ {
+ svn_branch__state_t *b = APR_ARRAY_IDX(branches, i, void *);
+ svn_branch__history_t *history
+ = svn_branch__history_create_empty(result_pool);
+
+ /* Set each branch's parent to the branch in the base rev */
+ svn_branch__rev_bid_t *parent
+ = svn_branch__rev_bid_create(base_revision,
+ svn_branch__get_id(b, scratch_pool),
+ result_pool);
+
+ svn_hash_sets(history->parents,
+ apr_pstrdup(result_pool, b->bid), parent);
+ SVN_ERR(svn_branch__state_set_history(b, history, scratch_pool));
+ }
+
+ *txn_p = txn;
+ return SVN_NO_ERROR;
+}
+
+/* Store the move-tracking / branch-tracking metadata from TXN into the
+ * repository. TXN->rev is the newly committed revision number.
+ */
+static svn_error_t *
+store_repos_info(svn_branch__txn_t *txn,
+ svn_ra_session_t *ra_session,
+ const char *branch_info_dir,
+ apr_pool_t *scratch_pool)
+{
+ svn_stringbuf_t *buf = svn_stringbuf_create_empty(scratch_pool);
+ svn_stream_t *stream = svn_stream_from_stringbuf(buf, scratch_pool);
+
+ SVN_ERR(svn_branch__txn_serialize(txn, stream, scratch_pool));
+
+ SVN_ERR(svn_stream_close(stream));
+ /*SVN_DBG(("store_repos_info: %s", buf->data));*/
+ SVN_ERR(write_rev_prop(ra_session, branch_info_dir, txn->rev,
+ svn_stringbuf__morph_into_string(buf), scratch_pool));
+
+ return SVN_NO_ERROR;
+}
+
+struct ccw_baton
+{
+ svn_commit_callback2_t original_callback;
+ void *original_baton;
+
+ svn_ra_session_t *session;
+ const char *branch_info_dir;
+ svn_branch__txn_t *branching_txn;
+};
+
+/* Wrapper which stores the branching/move-tracking info.
+ */
+static svn_error_t *
+commit_callback_wrapper(const svn_commit_info_t *commit_info,
+ void *baton,
+ apr_pool_t *pool)
+{
+ struct ccw_baton *ccwb = baton;
+
+ /* if this commit used element-branching info, store the new info */
+ if (ccwb->branching_txn)
+ {
+ svn_branch__repos_t *repos = ccwb->branching_txn->repos;
+
+ ccwb->branching_txn->rev = commit_info->revision;
+ SVN_ERR(svn_branch__repos_add_revision(repos, ccwb->branching_txn));
+ SVN_ERR(store_repos_info(ccwb->branching_txn, ccwb->session,
+ ccwb->branch_info_dir, pool));
+ }
+
+ /* call the wrapped callback */
+ if (ccwb->original_callback)
+ {
+ SVN_ERR(ccwb->original_callback(commit_info, ccwb->original_baton, pool));
+ }
+
+ return SVN_NO_ERROR;
+}
+
+
+/* Some RA layers do not correctly fill in REPOS_ROOT in commit_info, or
+ they are third-party layers conforming to an older commit_info structure.
+ Interpose a utility function to ensure the field is valid. */
+static void
+remap_commit_callback(svn_commit_callback2_t *callback,
+ void **callback_baton,
+ svn_ra_session_t *session,
+ svn_branch__txn_t *branching_txn,
+ const char *branch_info_dir,
+ svn_commit_callback2_t original_callback,
+ void *original_baton,
+ apr_pool_t *result_pool)
+{
+ /* Allocate this in RESULT_POOL, since the callback will be called
+ long after this function has returned. */
+ struct ccw_baton *ccwb = apr_palloc(result_pool, sizeof(*ccwb));
+
+ ccwb->session = session;
+ ccwb->branch_info_dir = apr_pstrdup(result_pool, branch_info_dir);
+ ccwb->branching_txn = branching_txn;
+ ccwb->original_callback = original_callback;
+ ccwb->original_baton = original_baton;
+
+ *callback = commit_callback_wrapper;
+ *callback_baton = ccwb;
+}
+
+
+/* Ev3 shims */
+struct fb_baton {
+ /* A session parented at the repository root */
+ svn_ra_session_t *session;
+ const char *repos_root_url;
+ const char *session_path;
+};
+
+/* Fetch kind and/or props and/or text.
+ *
+ * Implements svn_branch__compat_fetch_func_t. */
+static svn_error_t *
+fetch(svn_node_kind_t *kind_p,
+ apr_hash_t **props_p,
+ svn_stringbuf_t **file_text,
+ apr_hash_t **children_names,
+ void *baton,
+ const char *repos_relpath,
+ svn_revnum_t revision,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ struct fb_baton *fbb = baton;
+ svn_node_kind_t kind;
+ apr_hash_index_t *hi;
+
+ if (props_p)
+ *props_p = NULL;
+ if (file_text)
+ *file_text = NULL;
+ if (children_names)
+ *children_names = NULL;
+
+ SVN_ERR(svn_ra_check_path(fbb->session, repos_relpath, revision,
+ &kind, scratch_pool));
+ if (kind_p)
+ *kind_p = kind;
+ if (kind == svn_node_file && (props_p || file_text))
+ {
+ svn_stream_t *file_stream = NULL;
+
+ if (file_text)
+ {
+ *file_text = svn_stringbuf_create_empty(result_pool);
+ file_stream = svn_stream_from_stringbuf(*file_text, scratch_pool);
+ }
+ SVN_ERR(svn_ra_get_file(fbb->session, repos_relpath, revision,
+ file_stream, NULL, props_p, result_pool));
+ if (file_text)
+ {
+ SVN_ERR(svn_stream_close(file_stream));
+ }
+ }
+ else if (kind == svn_node_dir && (props_p || children_names))
+ {
+ SVN_ERR(svn_ra_get_dir2(fbb->session,
+ children_names, NULL, props_p,
+ repos_relpath, revision,
+ 0 /*minimal child info*/,
+ result_pool));
+ }
+
+ /* Remove non-regular props */
+ if (props_p && *props_p)
+ {
+ for (hi = apr_hash_first(scratch_pool, *props_p); hi; hi = apr_hash_next(hi))
+ {
+ const char *name = apr_hash_this_key(hi);
+
+ if (svn_property_kind2(name) != svn_prop_regular_kind)
+ svn_hash_sets(*props_p, name, NULL);
+
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
+svn_error_t *
+svn_ra_load_branching_state(svn_branch__txn_t **branching_txn_p,
+ svn_branch__compat_fetch_func_t *fetch_func,
+ void **fetch_baton,
+ svn_ra_session_t *session,
+ const char *branch_info_dir,
+ svn_revnum_t base_revision,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ svn_branch__repos_t *repos;
+ const char *repos_root_url, *session_url, *base_relpath;
+ struct fb_baton *fbb = apr_palloc(result_pool, sizeof (*fbb));
+
+ if (base_revision == SVN_INVALID_REVNUM)
+ {
+ SVN_ERR(svn_ra_get_latest_revnum(session, &base_revision, scratch_pool));
+ }
+
+ /* fetcher */
+ SVN_ERR(svn_ra_get_repos_root2(session, &repos_root_url, result_pool));
+ SVN_ERR(svn_ra_get_session_url(session, &session_url, scratch_pool));
+ base_relpath = svn_uri_skip_ancestor(repos_root_url, session_url, result_pool);
+ SVN_ERR(svn_ra__dup_session(&fbb->session, session, repos_root_url, result_pool, scratch_pool));
+ fbb->session_path = base_relpath;
+ fbb->repos_root_url = repos_root_url;
+ *fetch_func = fetch;
+ *fetch_baton = fbb;
+
+ SVN_ERR(branch_repos_fetch_info(&repos,
+ session, branch_info_dir,
+ *fetch_func, *fetch_baton,
+ result_pool, scratch_pool));
+ SVN_ERR(branch_get_mutable_state(branching_txn_p,
+ repos, session, branch_info_dir,
+ base_revision,
+ *fetch_func, *fetch_baton,
+ result_pool, scratch_pool));
+
+ return SVN_NO_ERROR;
+}
+
+svn_error_t *
+svn_ra_get_commit_txn(svn_ra_session_t *session,
+ svn_branch__txn_t **edit_txn_p,
+ apr_hash_t *revprop_table,
+ svn_commit_callback2_t commit_callback,
+ void *commit_baton,
+ apr_hash_t *lock_tokens,
+ svn_boolean_t keep_locks,
+ const char *branch_info_dir,
+ apr_pool_t *pool)
+{
+ svn_branch__txn_t *branching_txn;
+ svn_branch__compat_fetch_func_t fetch_func;
+ void *fetch_baton;
+ const svn_delta_editor_t *deditor;
+ void *dedit_baton;
+ svn_branch__compat_shim_connector_t *shim_connector;
+
+ /* load branching info
+ * ### Currently we always start from a single base revision, never from
+ * a mixed-rev state */
+ SVN_ERR(svn_ra_load_branching_state(&branching_txn, &fetch_func, &fetch_baton,
+ session, branch_info_dir,
+ SVN_INVALID_REVNUM /*base_revision*/,
+ pool, pool));
+
+ /* arrange for branching info to be stored after commit */
+ remap_commit_callback(&commit_callback, &commit_baton,
+ session, branching_txn, branch_info_dir,
+ commit_callback, commit_baton, pool);
+
+ SVN_ERR(svn_ra_get_commit_editor3(session, &deditor, &dedit_baton,
+ revprop_table,
+ commit_callback, commit_baton,
+ lock_tokens, keep_locks, pool));
+
+ /* Convert to Ev3 */
+ {
+ const char *repos_root_url;
+
+ SVN_ERR(svn_ra_get_repos_root2(session, &repos_root_url, pool));
+
+ /*SVN_ERR(svn_delta__get_debug_editor(&deditor, &dedit_baton,
+ deditor, dedit_baton, "", pool));*/
+ SVN_ERR(svn_branch__compat_txn_from_delta_for_commit(
+ edit_txn_p,
+ &shim_connector,
+ deditor, dedit_baton, branching_txn,
+ repos_root_url,
+ fetch_func, fetch_baton,
+ NULL, NULL /*cancel*/,
+ pool, pool));
+ }
+
+ return SVN_NO_ERROR;
+}
+
diff --git a/tools/dev/svnmover/scanlog.c b/tools/dev/svnmover/scanlog.c
new file mode 100644
index 0000000..ebf8c53
--- /dev/null
+++ b/tools/dev/svnmover/scanlog.c
@@ -0,0 +1,517 @@
+/*
+ * scanlog.c: scanning the log for moves
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+/* ==================================================================== */
+
+
+
+/*** Includes. ***/
+
+#include "svn_hash.h"
+#include "svn_wc.h"
+#include "svn_client.h"
+#include "svn_error.h"
+#include "svn_config.h"
+#include "svn_time.h"
+#include "svn_dirent_uri.h"
+#include "svn_path.h"
+#include "svn_pools.h"
+#include "svn_io.h"
+
+#include "private/svn_client_private.h"
+#include "private/svn_wc_private.h"
+#include "private/svn_ra_private.h"
+
+#include "svnmover.h"
+
+#include "svn_private_config.h"
+
+
+/* From moves-scan-log branch */
+
+svn_repos_move_info_t *
+svn_repos_move_info_create(const char *moved_from_repos_relpath,
+ const char *moved_to_repos_relpath,
+ svn_revnum_t revision,
+ svn_revnum_t copyfrom_rev,
+ svn_repos_move_info_t *prev,
+ svn_repos_move_info_t *next,
+ apr_pool_t *result_pool)
+{
+ svn_repos_move_info_t *move = apr_palloc(result_pool, sizeof(*move));
+
+ move->moved_from_repos_relpath = moved_from_repos_relpath;
+ move->moved_to_repos_relpath = moved_to_repos_relpath;
+ move->revision = revision;
+ move->copyfrom_rev = copyfrom_rev;
+ move->prev = prev;
+ move->next = next;
+
+ return move;
+}
+
+const char *
+svn_client__format_move_chain_for_display(svn_repos_move_info_t *first_move,
+ const char *indent,
+ apr_pool_t *result_pool)
+{
+ const char *s;
+ svn_repos_move_info_t *last_move;
+
+ last_move = first_move;
+ while (last_move->next)
+ last_move = last_move->next;
+
+ if (last_move != first_move)
+ {
+ svn_repos_move_info_t *this_move;
+
+ s = apr_psprintf(result_pool,
+ _("Combined move:\n%s %s@%ld -> %s\n"
+ "%sIndividual moves:\n"),
+ indent, first_move->moved_from_repos_relpath,
+ first_move->copyfrom_rev,
+ last_move->moved_to_repos_relpath, indent);
+
+ this_move = first_move;
+ do
+ {
+ s = apr_pstrcat(result_pool, s,
+ apr_psprintf(
+ result_pool, _("%s [r%ld] %s@%ld -> %s\n"),
+ indent,
+ this_move->revision,
+ this_move->moved_from_repos_relpath,
+ this_move->copyfrom_rev,
+ this_move->moved_to_repos_relpath),
+ (char *)NULL);
+ this_move = this_move->next;
+ }
+ while (this_move);
+ }
+ else
+ s = apr_psprintf(result_pool, _(" [r%ld] %s@%ld -> %s\n"),
+ first_move->revision,
+ first_move->moved_from_repos_relpath,
+ first_move->copyfrom_rev,
+ first_move->moved_to_repos_relpath);
+
+ return s;
+}
+
+typedef struct scan_moves_log_receiver_baton {
+ /*const char *anchor_abspath;*/
+ svn_client_ctx_t *ctx;
+ svn_revnum_t start;
+ svn_revnum_t end;
+ svn_ra_session_t *ra_session;
+
+ /* The moved nodes hash to be populated.
+ * Maps a revision number to an array of svn_repos_move_info_t
+ * objects describing moves which happened in the revision.
+ *
+ * Given a sequence of moves which happened in given revisions, such as:
+ * rA: mv x->z
+ * rA: mv a->b
+ * rB: mv b->c
+ * rC: mv c->d
+ * we map each revision number to all moves which happened in the
+ * revision, which looks as follows:
+ * rA : [(rA, x->z), (rA, a->b)]
+ * rB : [(rB, b->c)]
+ * rC : [(rC, c->d)]
+ * This allows an update to find relevant moves based on the base
+ * revision of a node (during updates the base revision of each node
+ * in the working copy is arbitrary so we might not know the nodes 'a'
+ * and 'x' under these names).
+ * Additionally, all moves pertaining to the same node are chained into a
+ * doubly-linked list via 'next' and 'prev' pointers (see definition of
+ * svn_repos_move_info_t).
+ * This way, an update can look up all moves relevant to a node, forwards
+ * or backwards in history, once it has located a relevant move in the chain.
+ * This can be visualized as follows:
+ * rA : [(rA, x->z, prev=>NULL, next=>NULL),
+ * (rA, a->b, prev=>NULL, next=>(rB, b->c))]
+ * rB : [(rB, b->c), prev=>(rA, a->b), next=>(rC, c->d)]
+ * rC : [(rC, c->d), prev=>(rB, c->d), next=>NULL]
+ */
+ apr_hash_t *moves;
+
+ /* Temporary map of move-target paths to repos_move_info_t.
+ * Used to link multiple moves of the same node across revisions. */
+ apr_hash_t *moves_by_target_path;
+} scan_moves_log_receiver_baton;
+
+typedef struct copy_info {
+ const char *copyto_path;
+ const char *copyfrom_path;
+ svn_revnum_t copyfrom_rev;
+} copy_info;
+
+
+/* Set *RELATED to true if the deleted node at repository relpath
+ * DELETED_PATH@DELETED_REV is ancestrally related to the node at
+ * repository relpath COPYFROM_PATH@COPYFROM_REV, else set it to false.
+ *
+ * ### JAF: In practice this attempts to trace back, starting from
+ * DELETED_PATH@(DELETED_REV-1). What if that does not exist?
+ */
+static svn_error_t *
+check_ancestry(svn_boolean_t *related,
+ const char *session_url,
+ const char *repos_root_url,
+ const char *deleted_path,
+ svn_revnum_t deleted_rev,
+ const char *copyfrom_path,
+ svn_revnum_t copyfrom_rev,
+ svn_client_ctx_t *ctx,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *locations;
+ const char *old_url;
+ const char *old_location;
+ const char *relpath;
+ svn_ra_session_t *ra_session2;
+ apr_array_header_t *location_revisions;
+
+ *related = FALSE;
+
+ location_revisions = apr_array_make(scratch_pool, 1, sizeof(svn_revnum_t));
+ APR_ARRAY_PUSH(location_revisions, svn_revnum_t) = copyfrom_rev;
+ old_url = svn_uri_canonicalize(apr_pstrcat(scratch_pool,
+ repos_root_url, "/",
+ deleted_path, NULL),
+ scratch_pool);
+ relpath = svn_uri_skip_ancestor(session_url, old_url, scratch_pool);
+ SVN_ERR(svn_client_open_ra_session2(&ra_session2, session_url, NULL,
+ ctx, scratch_pool, scratch_pool));
+ if (relpath == NULL)
+ {
+ svn_error_t *err;
+
+ /* The deleted path is outside of the baton's RA session URL.
+ * Try to open the new RA session to the repository root. */
+ SVN_ERR(svn_ra_reparent(ra_session2, repos_root_url, scratch_pool));
+ relpath = svn_uri_skip_ancestor(repos_root_url, old_url, scratch_pool);
+ if (relpath == NULL)
+ return SVN_NO_ERROR;
+ err = svn_ra_get_locations(ra_session2, &locations, relpath,
+ deleted_rev - 1, location_revisions,
+ scratch_pool);
+ if (err)
+ {
+ if (err->apr_err == SVN_ERR_RA_NOT_AUTHORIZED ||
+ err->apr_err == SVN_ERR_RA_DAV_FORBIDDEN)
+ {
+ svn_error_clear(err);
+ return SVN_NO_ERROR;
+ }
+ else
+ return svn_error_trace(err);
+ }
+ }
+ else
+ SVN_ERR(svn_ra_get_locations(ra_session2, &locations, relpath,
+ deleted_rev - 1, location_revisions,
+ scratch_pool));
+
+ old_location = apr_hash_get(locations, &copyfrom_rev, sizeof(svn_revnum_t));
+ *related = (old_location &&
+ strcmp(old_location[0] == '/' ? old_location + 1 : old_location,
+ copyfrom_path) == 0);
+
+ return SVN_NO_ERROR;
+}
+
+static svn_error_t *
+scan_moves_log_receiver(void *baton,
+ svn_log_entry_t *log_entry,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_index_t *hi;
+ apr_hash_t *copies;
+ apr_array_header_t *deleted_paths;
+ struct scan_moves_log_receiver_baton *b = baton;
+ apr_pool_t *result_pool = apr_hash_pool_get(b->moves);
+ apr_pool_t *iterpool;
+ int i;
+ const char *session_url;
+ const char *repos_root_url;
+ apr_array_header_t *moves;
+
+ if (b->ctx->notify_func2)
+ {
+#if 0
+ svn_wc_notify_t *notify;
+ notify = svn_wc_create_notify(b->anchor_abspath,
+ svn_wc_notify_moves_scan_log_in_progress,
+ scratch_pool);
+ notify->moves_scan_log_start_rev = b->start;
+ notify->moves_scan_log_end_rev = b->end;
+ notify->moves_scan_log_current_rev = log_entry->revision;
+ b->ctx->notify_func2(b->ctx->notify_baton2, notify, scratch_pool);
+#endif
+ }
+
+ if (log_entry->changed_paths2 == NULL)
+ return SVN_NO_ERROR;
+
+ copies = apr_hash_make(scratch_pool);
+ deleted_paths = apr_array_make(scratch_pool, 0, sizeof(const char *));
+
+ /* Scan for copied and deleted nodes in this revision. */
+ for (hi = apr_hash_first(scratch_pool, log_entry->changed_paths2);
+ hi; hi = apr_hash_next(hi))
+ {
+ const char *path = apr_hash_this_key(hi);
+ svn_log_changed_path2_t *data = apr_hash_this_val(hi);
+
+ if ((data->action == 'A' || data->action == 'R') && data->copyfrom_path)
+ {
+ struct copy_info *copy;
+ apr_array_header_t *copies_with_same_source_path;
+
+ SVN_ERR_ASSERT(path[0] == '/');
+
+ if (data->copyfrom_path[0] == '/')
+ data->copyfrom_path++;
+
+ copy = apr_palloc(scratch_pool, sizeof(*copy));
+ copy->copyto_path = path + 1; /* Strip leading '/' */
+ copy->copyfrom_path = data->copyfrom_path;
+ copy->copyfrom_rev = data->copyfrom_rev;
+ copies_with_same_source_path = apr_hash_get(copies,
+ data->copyfrom_path,
+ APR_HASH_KEY_STRING);
+ if (copies_with_same_source_path == NULL)
+ {
+ copies_with_same_source_path = apr_array_make(
+ result_pool, 1,
+ sizeof(struct copy_info *));
+ apr_hash_set(copies, copy->copyfrom_path, APR_HASH_KEY_STRING,
+ copies_with_same_source_path);
+ }
+ APR_ARRAY_PUSH(copies_with_same_source_path,
+ struct copy_info *) = copy;
+ }
+
+ if (data->action == 'D' || data->action == 'R')
+ {
+ const char *parent_path;
+
+ /* ### Is this true? What does the API guarantee? Is it
+ ### true that copyfrom_path is a relpath? */
+ SVN_ERR_ASSERT(path[0] == '/');
+
+ /* When a delete is within a copy the deleted path in the
+ changed_paths2 hash is the copied path, but for the purposes
+ of move detection we want the pre-copy path.
+
+ ### Not sure if this is the correct thing to do. Yes, it
+ ### allows us to detect moves in copies/moves but will it
+ ### lead to false positives? Does it matter that the
+ ### adjusted path may not have been committed? Does it
+ ### matter that the adjusted path may be the same as
+ ### another committed path? */
+ parent_path = svn_dirent_dirname(path, scratch_pool);
+ while(strcmp(parent_path, "/"))
+ {
+ svn_log_changed_path2_t *data2
+ = apr_hash_get(log_entry->changed_paths2, parent_path,
+ APR_HASH_KEY_STRING);
+
+ if (data2 && data2->action == 'A')
+ {
+ const char *relpath = svn_dirent_skip_ancestor(parent_path,
+ path);
+ path = svn_dirent_join_many(scratch_pool, "/",
+ data2->copyfrom_path, relpath,
+ NULL);
+ break;
+ }
+ else
+ parent_path = svn_dirent_dirname(parent_path, scratch_pool);
+ }
+ APR_ARRAY_PUSH(deleted_paths, const char *) = path + 1;
+ }
+ }
+
+ /* If a node was deleted at one location and copied from the deleted
+ * location to a new location within the same revision, put the node
+ * on the moved-nodes list. */
+ SVN_ERR(svn_ra_get_session_url(b->ra_session, &session_url, scratch_pool));
+ SVN_ERR(svn_ra_get_repos_root2(b->ra_session, &repos_root_url, scratch_pool));
+ iterpool = svn_pool_create(scratch_pool);
+ for (i = 0; i < deleted_paths->nelts; i++)
+ {
+ const char *deleted_path;
+ apr_array_header_t *copies_with_same_source_path;
+ svn_repos_move_info_t *new_move;
+ svn_repos_move_info_t *prior_move;
+ svn_boolean_t related;
+ int j;
+
+ deleted_path = APR_ARRAY_IDX(deleted_paths, i, const char *);
+ copies_with_same_source_path = apr_hash_get(copies, deleted_path,
+ APR_HASH_KEY_STRING);
+ if (copies_with_same_source_path == NULL)
+ continue;
+
+ svn_pool_clear(iterpool);
+
+ for (j = 0; j < copies_with_same_source_path->nelts; j++)
+ {
+ struct copy_info *copy;
+
+ copy = APR_ARRAY_IDX(copies_with_same_source_path, j,
+ struct copy_info *);
+
+ /* We found a deleted node which matches the copyfrom path of
+ * a copied node. Verify that the deleted node is an ancestor
+ * of the copied node. Tracing back history of the deleted node
+ * from revision log_entry->revision-1 to the copyfrom-revision
+ * we must end up at the copyfrom-path. */
+ SVN_ERR(check_ancestry(&related, session_url, repos_root_url,
+ deleted_path, log_entry->revision,
+ copy->copyfrom_path,
+ copy->copyfrom_rev,
+ b->ctx, iterpool));
+ if (!related)
+ continue;
+
+ /* ### TODO:
+ * If the node was not copied from the most recent last-changed
+ * revision of the deleted node, this is not a move but a
+ * "copy from the past + delete". */
+
+ /* Remember details of this move. */
+ new_move = svn_repos_move_info_create(
+ apr_pstrdup(result_pool, deleted_path),
+ apr_pstrdup(result_pool, copy->copyto_path),
+ log_entry->revision, copy->copyfrom_rev,
+ NULL, NULL, result_pool);
+
+ /* Link together multiple moves of the same node. */
+ prior_move = apr_hash_get(b->moves_by_target_path,
+ new_move->moved_from_repos_relpath,
+ APR_HASH_KEY_STRING);
+ if (prior_move)
+ {
+ /* Tracing back history of the delete-half of the new move
+ * to the copyfrom-revision of the prior move we must end up
+ * at the delete-half of the prior move. */
+ SVN_ERR(check_ancestry(&related, session_url, repos_root_url,
+ new_move->moved_from_repos_relpath,
+ new_move->revision,
+ prior_move->moved_from_repos_relpath,
+ prior_move->copyfrom_rev,
+ b->ctx, iterpool));
+ if (related)
+ {
+ prior_move->next = new_move;
+ new_move->prev = prior_move;
+ }
+ }
+ apr_hash_set(b->moves_by_target_path,
+ new_move->moved_to_repos_relpath,
+ APR_HASH_KEY_STRING, new_move);
+
+ /* Add this move to the list of moves in this revision. */
+ moves = apr_hash_get(b->moves, &new_move->revision,
+ sizeof(svn_revnum_t));
+ if (moves == NULL)
+ {
+ moves = apr_array_make(result_pool, 1,
+ sizeof(svn_repos_move_info_t *));
+ APR_ARRAY_PUSH(moves, svn_repos_move_info_t *) = new_move;
+ apr_hash_set(b->moves, &new_move->revision, sizeof(svn_revnum_t),
+ moves);
+ }
+ else
+ APR_ARRAY_PUSH(moves, svn_repos_move_info_t *) = new_move;
+ }
+ }
+ svn_pool_destroy(iterpool);
+
+ return SVN_NO_ERROR;
+}
+
+svn_error_t *
+svn_client__get_repos_moves(apr_hash_t **moves,
+ const char *anchor_abspath,
+ svn_ra_session_t *ra_session,
+ svn_revnum_t start,
+ svn_revnum_t end,
+ svn_client_ctx_t *ctx,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ struct scan_moves_log_receiver_baton lrb;
+
+ /*lrb.anchor_abspath = anchor_abspath;*/
+ lrb.ctx = ctx;
+ lrb.moves = apr_hash_make(result_pool);
+ lrb.start = start;
+ lrb.end = end;
+ lrb.ra_session = ra_session;
+ lrb.moves_by_target_path = apr_hash_make(scratch_pool);
+
+ if (ctx->notify_func2)
+ {
+#if 0
+ svn_wc_notify_t *notify;
+ notify = svn_wc_create_notify(b->anchor_abspath,
+ svn_wc_notify_moves_scan_log_start,
+ scratch_pool);
+ notify->moves_scan_log_start_rev = start;
+ notify->moves_scan_log_end_rev = end;
+ notify->moves_scan_log_current_rev = start;
+ ctx->notify_func2(b->ctx->notify_baton2, notify, scratch_pool);
+#endif
+ }
+
+ SVN_ERR(svn_ra_get_log2(ra_session, NULL, start, end, 0, TRUE, FALSE,
+ FALSE, apr_array_make(scratch_pool, 0,
+ sizeof(const char *)),
+ scan_moves_log_receiver, &lrb, scratch_pool));
+
+ if (ctx->notify_func2)
+ {
+#if 0
+ svn_wc_notify_t *notify;
+ notify = svn_wc_create_notify(b->anchor_abspath,
+ svn_wc_notify_moves_scan_log_done,
+ scratch_pool);
+ notify->moves_scan_log_start_rev = start;
+ notify->moves_scan_log_end_rev = end;
+ notify->moves_scan_log_current_rev = end;
+ b->ctx->notify_func2(b->ctx->notify_baton2, notify, scratch_pool);
+#endif
+ }
+
+ if (moves)
+ *moves = lrb.moves;
+
+ return SVN_NO_ERROR;
+}
+
diff --git a/tools/dev/svnmover/svnmover.c b/tools/dev/svnmover/svnmover.c
new file mode 100644
index 0000000..8bc8b65
--- /dev/null
+++ b/tools/dev/svnmover/svnmover.c
@@ -0,0 +1,4759 @@
+/*
+ * svnmover.c: Concept Demo for Move Tracking and Branching
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+#include <apr_lib.h>
+
+#include "svn_private_config.h"
+#include "svn_hash.h"
+#include "svn_iter.h"
+#include "svn_client.h"
+#include "svn_cmdline.h"
+#include "svn_config.h"
+#include "svn_error.h"
+#include "svn_path.h"
+#include "svn_pools.h"
+#include "svn_props.h"
+#include "svn_string.h"
+#include "svn_subst.h"
+#include "svn_utf.h"
+#include "svn_version.h"
+#include "svnmover.h"
+
+#include "private/svn_cmdline_private.h"
+#include "private/svn_subr_private.h"
+#include "private/svn_branch_repos.h"
+#include "private/svn_branch_nested.h"
+#include "private/svn_branch_compat.h"
+#include "private/svn_ra_private.h"
+#include "private/svn_string_private.h"
+#include "private/svn_sorts_private.h"
+#include "private/svn_token.h"
+#include "private/svn_client_private.h"
+#include "private/svn_delta_private.h"
+
+#ifdef HAVE_LINENOISE
+#include "linenoise/linenoise.h"
+#endif
+
+/* Version compatibility check */
+static svn_error_t *
+check_lib_versions(void)
+{
+ static const svn_version_checklist_t checklist[] =
+ {
+ { "svn_client", svn_client_version },
+ { "svn_subr", svn_subr_version },
+ { "svn_ra", svn_ra_version },
+ { NULL, NULL }
+ };
+ SVN_VERSION_DEFINE(my_version);
+
+ return svn_ver_check_list2(&my_version, checklist, svn_ver_equal);
+}
+
+static svn_boolean_t quiet = FALSE;
+
+/* UI mode: whether to display output in terms of paths or elements */
+int the_ui_mode = UI_MODE_EIDS;
+static const svn_token_map_t ui_mode_map[]
+ = { {"eids", UI_MODE_EIDS},
+ {"e", UI_MODE_EIDS},
+ {"paths", UI_MODE_PATHS},
+ {"p", UI_MODE_PATHS},
+ {"serial", UI_MODE_SERIAL},
+ {"s", UI_MODE_SERIAL},
+ {NULL, SVN_TOKEN_UNKNOWN} };
+
+#define is_branch_root_element(branch, eid) \
+ (svn_branch__root_eid(branch) == (eid))
+
+/* Is BRANCH1 the same branch as BRANCH2? Compare by full branch-ids; don't
+ require identical branch objects. */
+#define BRANCH_IS_SAME_BRANCH(branch1, branch2, scratch_pool) \
+ (strcmp(svn_branch__get_id(branch1, scratch_pool), \
+ svn_branch__get_id(branch2, scratch_pool)) == 0)
+
+static svn_boolean_t use_coloured_output = FALSE;
+
+#ifndef WIN32
+
+/* Some ANSI escape codes for controlling text colour in terminal output. */
+#define TEXT_RESET "\x1b[0m"
+#define TEXT_FG_BLACK "\x1b[30m"
+#define TEXT_FG_RED "\x1b[31m"
+#define TEXT_FG_GREEN "\x1b[32m"
+#define TEXT_FG_YELLOW "\x1b[33m"
+#define TEXT_FG_BLUE "\x1b[34m"
+#define TEXT_FG_MAGENTA "\x1b[35m"
+#define TEXT_FG_CYAN "\x1b[36m"
+#define TEXT_FG_WHITE "\x1b[37m"
+#define TEXT_BG_BLACK "\x1b[40m"
+#define TEXT_BG_RED "\x1b[41m"
+#define TEXT_BG_GREEN "\x1b[42m"
+#define TEXT_BG_YELLOW "\x1b[43m"
+#define TEXT_BG_BLUE "\x1b[44m"
+#define TEXT_BG_MAGENTA "\x1b[45m"
+#define TEXT_BG_CYAN "\x1b[46m"
+#define TEXT_BG_WHITE "\x1b[47m"
+
+#define settext(text_attr) \
+ do { \
+ if (use_coloured_output) \
+ { fputs(text_attr, stdout); fflush(stdout); } \
+ } while (0)
+#define settext_stderr(text_attr) \
+ do { \
+ if (use_coloured_output) \
+ { fputs(text_attr, stderr); fflush(stderr); } \
+ } while (0)
+
+#else
+
+/* To support colour on Windows, we could try:
+ *
+ * https://github.com/mattn/ansicolor-w32.c
+ *
+ * (I notice some obvious bugs in its puts/fputs implementations: the #defines
+ * point to _fprintf_w32 instead of _fputs_w32, and puts() fails to append a
+ * newline).
+ */
+
+#define settext(code)
+#define settext_stderr(code)
+
+#endif
+
+__attribute__((format(printf, 1, 2)))
+void
+svnmover_notify(const char *fmt,
+ ...)
+{
+ va_list ap;
+
+ settext(TEXT_FG_GREEN);
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+ settext(TEXT_RESET);
+ printf("\n");
+}
+
+__attribute__((format(printf, 1, 2)))
+void
+svnmover_notify_v(const char *fmt,
+ ...)
+{
+ va_list ap;
+
+ if (! quiet)
+ {
+ settext(TEXT_FG_BLUE);
+ va_start(ap, fmt);
+ vprintf(fmt, ap);
+ va_end(ap);
+ settext(TEXT_RESET);
+ printf("\n");
+ }
+}
+
+#define SVN_CL__LOG_SEP_STRING \
+ "------------------------------------------------------------------------\n"
+
+/* ====================================================================== */
+
+/* Set the WC base revision of element EID to BASE_REV.
+ */
+static void
+svnmover_wc_set_base_rev(svnmover_wc_t *wc,
+ svn_branch__state_t *branch,
+ int eid,
+ svn_revnum_t base_rev)
+{
+ apr_hash_t *branch_base_revs = svn_hash_gets(wc->base_revs, branch->bid);
+ void *val = apr_pmemdup(wc->pool, &base_rev, sizeof(base_rev));
+
+ if (!branch_base_revs)
+ {
+ branch_base_revs = apr_hash_make(wc->pool);
+ svn_hash_sets(wc->base_revs, apr_pstrdup(wc->pool, branch->bid),
+ branch_base_revs);
+ }
+ svn_eid__hash_set(branch_base_revs, eid, val);
+}
+
+/* Get the WC base revision of element EID, or SVN_INVALID_REVNUM if
+ * element EID is not present in the WC base.
+ */
+static svn_revnum_t
+svnmover_wc_get_base_rev(svnmover_wc_t *wc,
+ svn_branch__state_t *branch,
+ int eid,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *branch_base_revs = svn_hash_gets(wc->base_revs, branch->bid);
+ svn_error_t *err;
+ svn_element__content_t *element;
+ svn_revnum_t *base_rev_p;
+
+ if (!branch_base_revs)
+ {
+ return SVN_INVALID_REVNUM;
+ }
+ err = svn_branch__state_get_element(branch, &element, eid, scratch_pool);
+ if (err || !element)
+ {
+ svn_error_clear(err);
+ return SVN_INVALID_REVNUM;
+ }
+
+ base_rev_p = svn_eid__hash_get(branch_base_revs, eid);
+ if (! base_rev_p)
+ return SVN_INVALID_REVNUM;
+ return *base_rev_p;
+}
+
+/* Set the WC base revision to BASE_REV for each element in WC base branch
+ * BRANCH, including nested branches.
+ */
+static svn_error_t *
+svnmover_wc_set_base_revs_r(svnmover_wc_t *wc,
+ svn_branch__state_t *branch,
+ svn_revnum_t base_rev,
+ apr_pool_t *scratch_pool)
+{
+ svn_element__tree_t *elements;
+ apr_hash_index_t *hi;
+
+ SVN_ERR(svn_branch__state_get_elements(branch, &elements, scratch_pool));
+ for (hi = apr_hash_first(scratch_pool, elements->e_map);
+ hi; hi = apr_hash_next(hi))
+ {
+ int eid = svn_eid__hash_this_key(hi);
+ svn_element__content_t *element;
+
+ svnmover_wc_set_base_rev(wc, branch, eid, base_rev);
+
+ /* recurse into nested branches */
+ SVN_ERR(svn_branch__state_get_element(branch, &element, eid,
+ scratch_pool));
+ if (element->payload->is_subbranch_root)
+ {
+ const char *subbranch_id
+ = svn_branch__id_nest(branch->bid, eid, scratch_pool);
+ svn_branch__state_t *subbranch
+ = svn_branch__txn_get_branch_by_id(branch->txn, subbranch_id,
+ scratch_pool);
+
+ SVN_ERR(svnmover_wc_set_base_revs_r(wc, subbranch,
+ base_rev, scratch_pool));
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Set the WC base revision to BASE_REV for each element in WC base branch
+ * BRANCH, including nested branches.
+ */
+static svn_error_t *
+svnmover_wc_set_base_revs(svnmover_wc_t *wc,
+ svn_branch__state_t *branch,
+ svn_revnum_t base_rev,
+ apr_pool_t *scratch_pool)
+{
+ wc->base_revs = apr_hash_make(wc->pool);
+ SVN_ERR(svnmover_wc_set_base_revs_r(wc, branch, base_rev, scratch_pool));
+ return SVN_NO_ERROR;
+}
+
+/* Get the lowest and highest base revision numbers in WC base branch
+ * BRANCH, including nested branches.
+ */
+static svn_error_t *
+svnmover_wc_get_base_revs_r(svnmover_wc_t *wc,
+ svn_revnum_t *base_rev_min,
+ svn_revnum_t *base_rev_max,
+ svn_branch__state_t *branch,
+ apr_pool_t *scratch_pool)
+{
+ svn_element__tree_t *base_elements;
+ apr_hash_index_t *hi;
+
+ SVN_ERR(svn_branch__state_get_elements(branch, &base_elements,
+ scratch_pool));
+
+ for (hi = apr_hash_first(scratch_pool, base_elements->e_map);
+ hi; hi = apr_hash_next(hi))
+ {
+ int eid = svn_eid__hash_this_key(hi);
+ svn_revnum_t rev = svnmover_wc_get_base_rev(wc, branch, eid,
+ scratch_pool);
+ svn_element__content_t *element;
+
+ if (*base_rev_min == SVN_INVALID_REVNUM
+ || rev < *base_rev_min)
+ *base_rev_min = rev;
+ if (*base_rev_max == SVN_INVALID_REVNUM
+ || rev > *base_rev_max)
+ *base_rev_max = rev;
+
+ /* recurse into nested branches */
+ SVN_ERR(svn_branch__state_get_element(branch, &element, eid,
+ scratch_pool));
+ if (element->payload->is_subbranch_root)
+ {
+ const char *subbranch_id
+ = svn_branch__id_nest(branch->bid, eid, scratch_pool);
+ svn_branch__state_t *subbranch
+ = svn_branch__txn_get_branch_by_id(branch->txn, subbranch_id,
+ scratch_pool);
+
+ SVN_ERR(svnmover_wc_get_base_revs_r(wc, base_rev_min, base_rev_max,
+ subbranch, scratch_pool));
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Get the lowest and highest base revision numbers in WC.
+ */
+static svn_error_t *
+svnmover_wc_get_base_revs(svnmover_wc_t *wc,
+ svn_revnum_t *base_rev_min,
+ svn_revnum_t *base_rev_max,
+ apr_pool_t *scratch_pool)
+{
+ *base_rev_min = SVN_INVALID_REVNUM;
+ *base_rev_max = SVN_INVALID_REVNUM;
+ SVN_ERR(svnmover_wc_get_base_revs_r(wc, base_rev_min, base_rev_max,
+ wc->base->branch, scratch_pool));
+ return SVN_NO_ERROR;
+}
+
+/* Update the WC to revision BASE_REVISION (SVN_INVALID_REVNUM means HEAD).
+ *
+ * Requires these fields in WC:
+ * head_revision
+ * repos_root_url
+ * ra_session
+ * pool
+ *
+ * Initializes these fields in WC:
+ * base_revision
+ * base_branch_id
+ * base_branch
+ * working_branch_id
+ * working_branch
+ * editor
+ *
+ * Assumes there are no changes in the WC: throws away the existing txn
+ * and starts a new one.
+ */
+static svn_error_t *
+wc_checkout(svnmover_wc_t *wc,
+ svn_revnum_t base_revision,
+ const char *base_branch_id,
+ apr_pool_t *scratch_pool)
+{
+ const char *branch_info_dir = NULL;
+ svn_branch__compat_fetch_func_t fetch_func;
+ void *fetch_baton;
+ svn_branch__txn_t *base_txn;
+
+ /* Validate and store the new base revision number */
+ if (! SVN_IS_VALID_REVNUM(base_revision))
+ base_revision = wc->head_revision;
+ else if (base_revision > wc->head_revision)
+ return svn_error_createf(SVN_ERR_FS_NO_SUCH_REVISION, NULL,
+ _("No such revision %ld (HEAD is %ld)"),
+ base_revision, wc->head_revision);
+
+ /* Choose whether to store branching info in a local dir or in revprops.
+ (For now, just to exercise the options, we choose local files for
+ RA-local and revprops for a remote repo.) */
+ if (strncmp(wc->repos_root_url, "file://", 7) == 0)
+ {
+ const char *repos_dir;
+
+ SVN_ERR(svn_uri_get_dirent_from_file_url(&repos_dir, wc->repos_root_url,
+ scratch_pool));
+ branch_info_dir = svn_dirent_join(repos_dir, "branch-info", scratch_pool);
+ }
+
+ /* Get a mutable transaction based on that rev. (This implementation
+ re-reads all the move-tracking data from the repository.) */
+ SVN_ERR(svn_ra_load_branching_state(&wc->edit_txn,
+ &fetch_func, &fetch_baton,
+ wc->ra_session, branch_info_dir,
+ base_revision,
+ wc->pool, scratch_pool));
+
+ wc->edit_txn = svn_branch__nested_txn_create(wc->edit_txn, wc->pool);
+
+ /* Store the WC base state */
+ base_txn = svn_branch__repos_get_base_revision_root(wc->edit_txn);
+ wc->base = apr_pcalloc(wc->pool, sizeof(*wc->base));
+ wc->base->revision = base_revision;
+ wc->base->branch
+ = svn_branch__txn_get_branch_by_id(base_txn, base_branch_id, scratch_pool);
+ if (! wc->base->branch)
+ return svn_error_createf(SVN_BRANCH__ERR, NULL,
+ "Cannot check out WC: branch %s not found in r%ld",
+ base_branch_id, base_revision);
+ SVN_ERR(svnmover_wc_set_base_revs(wc, wc->base->branch,
+ base_revision, scratch_pool));
+
+ wc->working = apr_pcalloc(wc->pool, sizeof(*wc->working));
+ wc->working->revision = SVN_INVALID_REVNUM;
+ wc->working->branch
+ = svn_branch__txn_get_branch_by_id(wc->edit_txn, base_branch_id,
+ scratch_pool);
+ SVN_ERR_ASSERT(wc->working->branch);
+
+ return SVN_NO_ERROR;
+}
+
+/* Create a simulated WC, in memory.
+ *
+ * Initializes these fields in WC:
+ * head_revision
+ * repos_root_url
+ * ra_session
+ * made_changes
+ * ctx
+ * pool
+ *
+ * BASE_REVISION is the revision to work on, or SVN_INVALID_REVNUM for HEAD.
+ */
+static svn_error_t *
+wc_create(svnmover_wc_t **wc_p,
+ const char *anchor_url,
+ svn_revnum_t base_revision,
+ const char *base_branch_id,
+ svn_client_ctx_t *ctx,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ apr_pool_t *wc_pool = svn_pool_create(result_pool);
+ svnmover_wc_t *wc = apr_pcalloc(wc_pool, sizeof(*wc));
+
+ wc->pool = wc_pool;
+ wc->ctx = ctx;
+
+ SVN_ERR(svn_client_open_ra_session2(&wc->ra_session, anchor_url,
+ NULL /* wri_abspath */, ctx,
+ wc_pool, scratch_pool));
+
+ SVN_ERR(svn_ra_get_repos_root2(wc->ra_session, &wc->repos_root_url,
+ result_pool));
+ SVN_ERR(svn_ra_get_latest_revnum(wc->ra_session, &wc->head_revision,
+ scratch_pool));
+ SVN_ERR(svn_ra_reparent(wc->ra_session, wc->repos_root_url, scratch_pool));
+
+ SVN_ERR(wc_checkout(wc, base_revision, base_branch_id, scratch_pool));
+ *wc_p = wc;
+ return SVN_NO_ERROR;
+}
+
+svn_error_t *
+svnmover_element_differences(apr_hash_t **diff_p,
+ const svn_element__tree_t *left,
+ const svn_element__tree_t *right,
+ apr_hash_t *elements,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *diff = apr_hash_make(result_pool);
+ apr_hash_index_t *hi;
+
+ if (! left)
+ left = svn_element__tree_create(NULL, 0 /*root_eid*/, scratch_pool);
+ if (! right)
+ right = svn_element__tree_create(NULL, 0 /*root_eid*/, scratch_pool);
+
+ /*SVN_DBG(("element_differences(b%s r%ld, b%s r%ld, e%d)",
+ svn_branch__get_id(left->branch, scratch_pool), left->rev,
+ svn_branch__get_id(right->branch, scratch_pool), right->rev,
+ right->eid));*/
+
+ if (!elements)
+ elements = hash_overlay(left->e_map, right->e_map);
+
+ for (hi = apr_hash_first(scratch_pool, elements);
+ hi; hi = apr_hash_next(hi))
+ {
+ int e = svn_eid__hash_this_key(hi);
+ svn_element__content_t *element_left
+ = svn_element__tree_get(left, e);
+ svn_element__content_t *element_right
+ = svn_element__tree_get(right, e);
+
+ if (! svn_element__content_equal(element_left, element_right,
+ scratch_pool))
+ {
+ svn_element__content_t **contents
+ = apr_palloc(result_pool, 2 * sizeof(void *));
+
+ contents[0] = element_left;
+ contents[1] = element_right;
+ svn_eid__hash_set(diff, e, contents);
+ }
+ }
+
+ *diff_p = diff;
+ return SVN_NO_ERROR;
+}
+
+/* */
+static const char *
+rev_bid_str(const svn_branch__rev_bid_t *rev_bid,
+ apr_pool_t *result_pool)
+{
+ if (!rev_bid)
+ return "<nil>";
+ return apr_psprintf(result_pool, "r%ld.%s", rev_bid->rev, rev_bid->bid);
+}
+
+/* */
+static const char *
+list_parents(svn_branch__history_t *history,
+ apr_pool_t *result_pool)
+{
+ const char *result = "";
+ apr_hash_index_t *hi;
+
+ for (hi = apr_hash_first(result_pool, history->parents);
+ hi; hi = apr_hash_next(hi))
+ {
+ svn_branch__rev_bid_t *parent = apr_hash_this_val(hi);
+ const char *parent_str = rev_bid_str(parent, result_pool);
+
+ result = apr_psprintf(result_pool, "%s%s%s",
+ result, result[0] ? ", " : "", parent_str);
+ }
+ return result;
+}
+
+/* Return a string representation of HISTORY.
+ */
+static const char *
+history_str(svn_branch__history_t *history,
+ apr_pool_t *result_pool)
+{
+ const char *result
+ = list_parents(history, result_pool);
+
+ return apr_psprintf(result_pool, "parents={%s}", result);
+}
+
+/*
+ */
+static svn_error_t *
+svn_branch__history_add_parent(svn_branch__history_t *history,
+ svn_revnum_t rev,
+ const char *branch_id,
+ apr_pool_t *scratch_pool)
+{
+ apr_pool_t *pool = apr_hash_pool_get(history->parents);
+ svn_branch__rev_bid_t *new_parent;
+
+ new_parent = svn_branch__rev_bid_create(rev, branch_id, pool);
+ svn_hash_sets(history->parents, apr_pstrdup(pool, branch_id), new_parent);
+ return SVN_NO_ERROR;
+}
+
+/* Set *DIFFERENCE_P to some sort of indication of the difference between
+ * HISTORY1 and HISTORY2, or to null if there is no difference.
+ *
+ * Inputs may be null.
+ */
+static svn_error_t *
+history_diff(const char **difference_p,
+ svn_branch__history_t *history1,
+ svn_branch__history_t *history2,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *combined;
+ apr_hash_index_t *hi;
+ svn_boolean_t different = FALSE;
+
+ if (! history1)
+ history1 = svn_branch__history_create_empty(scratch_pool);
+ if (! history2)
+ history2 = svn_branch__history_create_empty(scratch_pool);
+ combined = hash_overlay(history1->parents,
+ history2->parents);
+
+ for (hi = apr_hash_first(scratch_pool, combined);
+ hi; hi = apr_hash_next(hi))
+ {
+ const char *bid = apr_hash_this_key(hi);
+ svn_branch__rev_bid_t *parent1 = svn_hash_gets(history1->parents, bid);
+ svn_branch__rev_bid_t *parent2 = svn_hash_gets(history2->parents, bid);
+
+ if (!(parent1 && parent2
+ && svn_branch__rev_bid_equal(parent1, parent2)))
+ {
+ different = TRUE;
+ break;
+ }
+ }
+ if (different)
+ {
+ *difference_p = apr_psprintf(result_pool, "%s -> %s",
+ history_str(history1, scratch_pool),
+ history_str(history2, scratch_pool));
+ }
+ else
+ {
+ *difference_p = NULL;
+ }
+ return SVN_NO_ERROR;
+}
+
+/* Set *IS_CHANGED to true if EDIT_TXN differs from its base txn, else to
+ * false.
+ *
+ * Notice only a difference in content: branches deleted or added, or branch
+ * contents different. Ignore any differences in branch history metadata.
+ *
+ * ### At least we must ignore the "this branch" parent changing from
+ * old-revision to new-revision. However we should probably notice
+ * if a merge parent is added (which means we want to make a commit
+ * recording this merge, even if no content changed), and perhaps
+ * other cases.
+ */
+static svn_error_t *
+txn_is_changed(svn_branch__txn_t *edit_txn,
+ svn_boolean_t *is_changed,
+ apr_pool_t *scratch_pool)
+{
+ int i;
+ svn_branch__txn_t *base_txn
+ = svn_branch__repos_get_base_revision_root(edit_txn);
+ apr_array_header_t *edit_branches
+ = svn_branch__txn_get_branches(edit_txn, scratch_pool);
+ apr_array_header_t *base_branches
+ = svn_branch__txn_get_branches(base_txn, scratch_pool);
+
+ *is_changed = FALSE;
+
+ /* If any previous branch is now missing, that's a change. */
+ for (i = 0; i < base_branches->nelts; i++)
+ {
+ svn_branch__state_t *base_branch = APR_ARRAY_IDX(base_branches, i, void *);
+ svn_branch__state_t *edit_branch
+ = svn_branch__txn_get_branch_by_id(edit_txn, base_branch->bid,
+ scratch_pool);
+
+ if (! edit_branch)
+ {
+ *is_changed = TRUE;
+ return SVN_NO_ERROR;
+ }
+ }
+
+ /* If any current branch is new or changed, that's a change. */
+ for (i = 0; i < edit_branches->nelts; i++)
+ {
+ svn_branch__state_t *edit_branch = APR_ARRAY_IDX(edit_branches, i, void *);
+ svn_branch__state_t *base_branch
+ = svn_branch__txn_get_branch_by_id(base_txn, edit_branch->bid,
+ scratch_pool);
+ svn_element__tree_t *edit_branch_elements, *base_branch_elements;
+ apr_hash_t *diff;
+
+ if (! base_branch)
+ {
+ *is_changed = TRUE;
+ return SVN_NO_ERROR;
+ }
+
+#if 0
+ /* Compare histories */
+ /* ### No, don't. Ignore any differences in branch history metadata. */
+ {
+ svn_branch__history_t *edit_branch_history;
+ svn_branch__history_t *base_branch_history;
+ const char *history_difference;
+
+ SVN_ERR(svn_branch__state_get_history(edit_branch, &edit_branch_history,
+ scratch_pool));
+ SVN_ERR(svn_branch__state_get_history(base_branch, &base_branch_history,
+ scratch_pool));
+ SVN_ERR(history_diff(&history_difference,
+ edit_branch_history,
+ base_branch_history,
+ scratch_pool, scratch_pool));
+ if (history_difference)
+ {
+ *is_changed = TRUE;
+ return SVN_NO_ERROR;
+ }
+ }
+#endif
+
+ /* Compare elements */
+ SVN_ERR(svn_branch__state_get_elements(edit_branch, &edit_branch_elements,
+ scratch_pool));
+ SVN_ERR(svn_branch__state_get_elements(base_branch, &base_branch_elements,
+ scratch_pool));
+ SVN_ERR(svnmover_element_differences(&diff,
+ edit_branch_elements,
+ base_branch_elements,
+ NULL /*all elements*/,
+ scratch_pool, scratch_pool));
+ if (apr_hash_count(diff))
+ {
+ *is_changed = TRUE;
+ return SVN_NO_ERROR;
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Replay the whole-element changes between LEFT_BRANCH and RIGHT_BRANCH
+ * into EDIT_BRANCH.
+ *
+ * Replaying means, for each element E that is changed (added, modified
+ * or deleted) between left and right branches, we set element E in
+ * EDIT_BRANCH to whole value of E in RIGHT_BRANCH. This is not like
+ * merging: each change resets an element's whole value.
+ *
+ * ELEMENTS_TO_DIFF (eid -> [anything]) says which elements to diff; if
+ * null, diff all elements in the union of left & right branches.
+ *
+ * LEFT_BRANCH and/or RIGHT_BRANCH may be null which means the equivalent
+ * of an empty branch.
+ *
+ * Non-recursive: single branch only.
+ */
+static svn_error_t *
+branch_elements_replay(svn_branch__state_t *edit_branch,
+ const svn_branch__state_t *left_branch,
+ const svn_branch__state_t *right_branch,
+ apr_hash_t *elements_to_diff,
+ apr_pool_t *scratch_pool)
+{
+ svn_element__tree_t *s_left = NULL, *s_right = NULL;
+ apr_hash_t *diff_left_right;
+ apr_hash_index_t *hi;
+
+ if (left_branch)
+ SVN_ERR(svn_branch__state_get_elements(left_branch, &s_left,
+ scratch_pool));
+ if (right_branch)
+ SVN_ERR(svn_branch__state_get_elements(right_branch, &s_right,
+ scratch_pool));
+ SVN_ERR(svnmover_element_differences(&diff_left_right,
+ s_left, s_right,
+ elements_to_diff,
+ scratch_pool, scratch_pool));
+
+ /* Go through the per-element differences. */
+ for (hi = apr_hash_first(scratch_pool, diff_left_right);
+ hi; hi = apr_hash_next(hi))
+ {
+ int eid = svn_eid__hash_this_key(hi);
+ svn_element__content_t **e_pair = apr_hash_this_val(hi);
+ svn_element__content_t *e0 = e_pair[0], *e1 = e_pair[1];
+
+ SVN_ERR_ASSERT(!e0
+ || svn_element__payload_invariants(e0->payload));
+ SVN_ERR_ASSERT(!e1
+ || svn_element__payload_invariants(e1->payload));
+ SVN_ERR(svn_branch__state_set_element(edit_branch, eid,
+ e1, scratch_pool));
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* */
+static svn_error_t *
+get_union_of_subbranches(apr_hash_t **all_subbranches_p,
+ svn_branch__state_t *left_branch,
+ svn_branch__state_t *right_branch,
+ apr_pool_t *result_pool)
+{
+ apr_hash_t *all_subbranches;
+ svn_branch__subtree_t *s_left = NULL;
+ svn_branch__subtree_t *s_right = NULL;
+
+ if (left_branch)
+ SVN_ERR(svn_branch__get_subtree(left_branch, &s_left,
+ svn_branch__root_eid(left_branch),
+ result_pool));
+ if (right_branch)
+ SVN_ERR(svn_branch__get_subtree(right_branch, &s_right,
+ svn_branch__root_eid(right_branch),
+ result_pool));
+ all_subbranches
+ = (s_left && s_right) ? hash_overlay(s_left->subbranches,
+ s_right->subbranches)
+ : s_left ? s_left->subbranches
+ : s_right ? s_right->subbranches
+ : apr_hash_make(result_pool);
+
+ *all_subbranches_p = all_subbranches;
+ return SVN_NO_ERROR;
+}
+
+/* Replay differences between S_LEFT and S_RIGHT into EDITOR:EDIT_BRANCH.
+ *
+ * S_LEFT or S_RIGHT (but not both) may be null meaning an empty set.
+ *
+ * Recurse into subbranches.
+ */
+static svn_error_t *
+svn_branch__replay(svn_branch__txn_t *edit_txn,
+ svn_branch__state_t *edit_branch,
+ svn_branch__state_t *left_branch,
+ svn_branch__state_t *right_branch,
+ apr_pool_t *scratch_pool)
+{
+ assert((left_branch && right_branch)
+ ? (svn_branch__root_eid(left_branch) == svn_branch__root_eid(right_branch))
+ : (left_branch || right_branch));
+
+ if (right_branch)
+ {
+ /* Replay this branch */
+ apr_hash_t *elements_to_diff = NULL; /*means the union of left & right*/
+
+ SVN_ERR(branch_elements_replay(edit_branch, left_branch, right_branch,
+ elements_to_diff, scratch_pool));
+ }
+ else
+ {
+ /* deleted branch LEFT */
+ /* nothing to do -- it will go away because we deleted the outer-branch
+ element where it was attached */
+ }
+
+ /* Replay any change in history */
+ /* ### Actually, here we just set the output history to the right-hand-side
+ history if that differs from left-hand-side.
+ This doesn't seem right, in general. It's OK if we're just copying
+ a txn into a fresh txn, as for example we do during commit. */
+ {
+ svn_branch__history_t *left_history = NULL;
+ svn_branch__history_t *right_history = NULL;
+ const char *history_difference;
+
+ if (left_branch)
+ SVN_ERR(svn_branch__state_get_history(left_branch, &left_history,
+ scratch_pool));
+ if (right_branch)
+ SVN_ERR(svn_branch__state_get_history(right_branch, &right_history,
+ scratch_pool));
+ SVN_ERR(history_diff(&history_difference, left_history, right_history,
+ scratch_pool, scratch_pool));
+ if (history_difference)
+ {
+ SVN_ERR(svn_branch__state_set_history(edit_branch, right_history,
+ scratch_pool));
+ }
+ }
+
+ /* Replay its subbranches, recursively.
+ (If we're deleting the current branch, we don't also need to
+ explicitly delete its subbranches... do we?) */
+ if (right_branch)
+ {
+ apr_hash_t *all_subbranches;
+ apr_hash_index_t *hi;
+
+ SVN_ERR(get_union_of_subbranches(&all_subbranches,
+ left_branch, right_branch, scratch_pool));
+ for (hi = apr_hash_first(scratch_pool, all_subbranches);
+ hi; hi = apr_hash_next(hi))
+ {
+ int this_eid = svn_eid__hash_this_key(hi);
+ svn_branch__state_t *left_subbranch = NULL;
+ svn_branch__state_t *right_subbranch = NULL;
+ svn_branch__state_t *edit_subbranch = NULL;
+
+ if (left_branch)
+ SVN_ERR(svn_branch__get_subbranch_at_eid(
+ left_branch, &left_subbranch, this_eid, scratch_pool));
+ if (right_branch)
+ SVN_ERR(svn_branch__get_subbranch_at_eid(
+ right_branch, &right_subbranch, this_eid, scratch_pool));
+ /* If the subbranch is to be edited or added, first look up the
+ corresponding edit subbranch, or, if not found, create one. */
+ if (right_subbranch)
+ {
+ const char *new_branch_id
+ = svn_branch__id_nest(edit_branch->bid, this_eid, scratch_pool);
+
+ SVN_ERR(svn_branch__txn_open_branch(edit_txn, &edit_subbranch,
+ new_branch_id,
+ svn_branch__root_eid(right_subbranch),
+ NULL /*tree_ref*/,
+ scratch_pool, scratch_pool));
+ }
+
+ /* recurse */
+ if (edit_subbranch)
+ {
+ SVN_ERR(svn_branch__replay(edit_txn, edit_subbranch,
+ left_subbranch, right_subbranch,
+ scratch_pool));
+ }
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Replay differences between LEFT_BRANCH and RIGHT_BRANCH into
+ * EDIT_ROOT_BRANCH.
+ * (Recurse into subbranches.)
+ */
+static svn_error_t *
+replay(svn_branch__txn_t *edit_txn,
+ svn_branch__state_t *edit_root_branch,
+ svn_branch__state_t *left_branch,
+ svn_branch__state_t *right_branch,
+ apr_pool_t *scratch_pool)
+{
+ SVN_ERR_ASSERT(left_branch || right_branch);
+
+ SVN_ERR(svn_branch__replay(edit_txn, edit_root_branch,
+ left_branch, right_branch, scratch_pool));
+ return SVN_NO_ERROR;
+}
+
+static svn_error_t *
+commit_callback(const svn_commit_info_t *commit_info,
+ void *baton,
+ apr_pool_t *pool);
+
+/* Baton for commit_callback(). */
+typedef struct commit_callback_baton_t
+{
+ svn_branch__txn_t *edit_txn;
+ const char *wc_base_branch_id;
+ const char *wc_commit_branch_id;
+
+ /* just-committed revision */
+ svn_revnum_t revision;
+} commit_callback_baton_t;
+
+static svn_error_t *
+display_diff_of_commit(const commit_callback_baton_t *ccbb,
+ apr_pool_t *scratch_pool);
+
+static svn_error_t *
+do_topbranch(svn_branch__state_t **new_branch_p,
+ svn_branch__txn_t *txn,
+ svn_branch__rev_bid_eid_t *from,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool);
+
+/* Allocate the same number of new EIDs in NEW_TXN as are already
+ * allocated in OLD_TXN.
+ */
+static svn_error_t *
+allocate_eids(svn_branch__txn_t *new_txn,
+ const svn_branch__txn_t *old_txn,
+ apr_pool_t *scratch_pool)
+{
+ int num_new_eids;
+ int i;
+
+ SVN_ERR(svn_branch__txn_get_num_new_eids(old_txn, &num_new_eids,
+ scratch_pool));
+ for (i = 0; i < num_new_eids; i++)
+ {
+ SVN_ERR(svn_branch__txn_new_eid(new_txn, NULL, scratch_pool));
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Update the EIDs, given that a commit has translated all new EIDs
+ * (negative numbers) to regular EIDs (positive numbers).
+ *
+ * ### TODO: This will need to take and use a new-EID-translation rule
+ * that must be returned by the commit, as we must not guess (as we
+ * presently do) what translation the server performed. This guess
+ * will fail once the server does rebasing on commit.
+ */
+static svn_error_t *
+update_wc_eids(svnmover_wc_t *wc,
+ apr_pool_t *scratch_pool)
+{
+ SVN_ERR(allocate_eids(wc->base->branch->txn, wc->working->branch->txn,
+ scratch_pool));
+ SVN_ERR(svn_branch__txn_finalize_eids(wc->base->branch->txn, scratch_pool));
+ SVN_ERR(svn_branch__txn_finalize_eids(wc->working->branch->txn, scratch_pool));
+ return SVN_NO_ERROR;
+}
+
+/* Update the WC base value of each committed element to match the
+ * corresponding WC working element value.
+ * Update the WC base revision for each committed element to NEW_REV.
+ *
+ * The committed elements are determined by diffing base against working.
+ * ### TODO: When we allow committing a subset of the WC, we'll need to
+ * pass in a list of the committed elements.
+ *
+ * BASE_BRANCH and/or WORK_BRANCH may be null.
+ */
+static svn_error_t *
+update_wc_base_r(svnmover_wc_t *wc,
+ svn_branch__state_t *base_branch,
+ svn_branch__state_t *work_branch,
+ svn_revnum_t new_rev,
+ apr_pool_t *scratch_pool)
+{
+ svn_element__tree_t *base_elements = NULL, *working_elements = NULL;
+ apr_hash_t *committed_elements;
+ apr_hash_index_t *hi;
+
+ if (base_branch)
+ SVN_ERR(svn_branch__state_get_elements(base_branch, &base_elements,
+ scratch_pool));
+ if (work_branch)
+ SVN_ERR(svn_branch__state_get_elements(work_branch, &working_elements,
+ scratch_pool));
+ SVN_ERR(svnmover_element_differences(&committed_elements,
+ base_elements, working_elements,
+ NULL /*all elements*/,
+ scratch_pool, scratch_pool));
+
+ for (hi = apr_hash_first(scratch_pool, committed_elements);
+ hi; hi = apr_hash_next(hi))
+ {
+ int eid = svn_eid__hash_this_key(hi);
+ svn_element__content_t *content = NULL;
+
+ if (work_branch)
+ SVN_ERR(svn_branch__state_get_element(work_branch, &content,
+ eid, scratch_pool));
+ SVN_ERR(svn_branch__state_set_element(base_branch, eid,
+ content, scratch_pool));
+ svnmover_wc_set_base_rev(wc, base_branch, eid, new_rev);
+
+ /* recurse into nested branches that exist in working */
+ if (content && content->payload->is_subbranch_root)
+ {
+ svn_branch__state_t *base_subbranch = NULL;
+ svn_branch__state_t *work_subbranch = NULL;
+
+ if (base_branch)
+ {
+ base_subbranch
+ = svn_branch__txn_get_branch_by_id(
+ base_branch->txn,
+ svn_branch__id_nest(base_branch->bid, eid, scratch_pool),
+ scratch_pool);
+ }
+ if (work_branch)
+ {
+ work_subbranch
+ = svn_branch__txn_get_branch_by_id(
+ work_branch->txn,
+ svn_branch__id_nest(work_branch->bid, eid, scratch_pool),
+ scratch_pool);
+ }
+ if (work_subbranch && !base_subbranch)
+ {
+ const char *new_branch_id
+ = svn_branch__id_nest(base_branch->bid, eid, scratch_pool);
+ svn_branch__history_t *history;
+
+ SVN_ERR(svn_branch__txn_open_branch(base_branch->txn,
+ &base_subbranch,
+ new_branch_id,
+ svn_branch__root_eid(work_subbranch),
+ NULL /*tree_ref*/,
+ scratch_pool, scratch_pool));
+ SVN_ERR(svn_branch__state_get_history(
+ work_subbranch, &history, scratch_pool));
+ SVN_ERR(svn_branch__state_set_history(
+ base_subbranch, history, scratch_pool));
+ }
+ SVN_ERR(update_wc_base_r(wc, base_subbranch, work_subbranch,
+ new_rev, scratch_pool));
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Update the WC base value of each committed element to match the
+ * corresponding WC working element value.
+ * Update the WC base revision for each committed element to NEW_REV.
+ *
+ * The committed elements are determined by diffing base against working.
+ * ### TODO: When we allow committing a subset of the WC, we'll need to
+ * pass in a list of the committed elements.
+ *
+ * ### This should be equivalent to 'replay(base, base, working)'. Use that
+ * instead.
+ */
+static svn_error_t *
+update_wc_base(svnmover_wc_t *wc,
+ svn_revnum_t new_rev,
+ apr_pool_t *scratch_pool)
+{
+ svn_branch__state_t *base_branch = wc->base->branch;
+ svn_branch__state_t *work_branch = wc->working->branch;
+ SVN_ERR(update_wc_base_r(wc, base_branch, work_branch,
+ new_rev, scratch_pool));
+ return SVN_NO_ERROR;
+}
+
+/* Commit the changes from WC into the repository.
+ *
+ * Open a new commit txn to the repo. Replay the changes from WC into it.
+ * Update the WC base for the committed elements.
+ *
+ * Set WC->head_revision and *NEW_REV_P to the committed revision number.
+ *
+ * If there are no changes to commit, set *NEW_REV_P to SVN_INVALID_REVNUM
+ * and do not make a commit and do not change WC->head_revision.
+ *
+ * NEW_REV_P may be null if not wanted.
+ */
+static svn_error_t *
+wc_commit(svn_revnum_t *new_rev_p,
+ svnmover_wc_t *wc,
+ apr_hash_t *revprops,
+ apr_pool_t *scratch_pool)
+{
+ const char *branch_info_dir = NULL;
+ svn_branch__txn_t *commit_txn;
+ commit_callback_baton_t ccbb;
+ svn_boolean_t change_detected;
+ const char *edit_root_branch_id;
+ svn_branch__state_t *edit_root_branch;
+
+ SVN_ERR(txn_is_changed(wc->working->branch->txn, &change_detected,
+ scratch_pool));
+ if (! change_detected)
+ {
+ wc->list_of_commands = NULL;
+ if (new_rev_p)
+ *new_rev_p = SVN_INVALID_REVNUM;
+ return SVN_NO_ERROR;
+ }
+
+ /* If no log msg provided, use the list of commands */
+ if (! svn_hash_gets(revprops, SVN_PROP_REVISION_LOG) && wc->list_of_commands)
+ {
+ /* Avoid modifying the passed-in revprops hash */
+ revprops = apr_hash_copy(scratch_pool, revprops);
+
+ svn_hash_sets(revprops, SVN_PROP_REVISION_LOG,
+ svn_string_create(wc->list_of_commands, scratch_pool));
+ }
+
+ /* Choose whether to store branching info in a local dir or in revprops.
+ (For now, just to exercise the options, we choose local files for
+ RA-local and revprops for a remote repo.) */
+ if (strncmp(wc->repos_root_url, "file://", 7) == 0)
+ {
+ const char *repos_dir;
+
+ SVN_ERR(svn_uri_get_dirent_from_file_url(&repos_dir, wc->repos_root_url,
+ scratch_pool));
+ branch_info_dir = svn_dirent_join(repos_dir, "branch-info", scratch_pool);
+ }
+
+ /* Start a new editor for the commit. */
+ SVN_ERR(svn_ra_get_commit_txn(wc->ra_session,
+ &commit_txn,
+ revprops,
+ commit_callback, &ccbb,
+ NULL /*lock_tokens*/, FALSE /*keep_locks*/,
+ branch_info_dir,
+ scratch_pool));
+ /*SVN_ERR(svn_branch__txn_get_debug(&wc->edit_txn, wc->edit_txn, scratch_pool));*/
+
+ edit_root_branch_id = wc->working->branch->bid;
+ edit_root_branch = svn_branch__txn_get_branch_by_id(
+ commit_txn, wc->working->branch->bid, scratch_pool);
+
+ /* We might be creating a new top-level branch in this commit. That is the
+ only case in which the working branch will not be found in EDIT_TXN.
+ (Creating any other branch can only be done inside a checkout of a
+ parent branch.) So, maybe create a new top-level branch. */
+ if (! edit_root_branch)
+ {
+ /* Create a new top-level branch in the edited state. (It will have
+ an independent new top-level branch number.) */
+ svn_branch__rev_bid_eid_t *from
+ = svn_branch__rev_bid_eid_create(wc->base->revision,
+ wc->base->branch->bid,
+ svn_branch__root_eid(wc->base->branch),
+ scratch_pool);
+
+ SVN_ERR(do_topbranch(&edit_root_branch, commit_txn,
+ from, scratch_pool, scratch_pool));
+ edit_root_branch_id = edit_root_branch->bid;
+ }
+ /* Allocate all the new eids we'll need in this new txn */
+ SVN_ERR(allocate_eids(commit_txn, wc->working->branch->txn, scratch_pool));
+ SVN_ERR(replay(commit_txn, edit_root_branch,
+ wc->base->branch,
+ wc->working->branch,
+ scratch_pool));
+
+ ccbb.edit_txn = commit_txn;
+ ccbb.wc_base_branch_id = wc->base->branch->bid;
+ ccbb.wc_commit_branch_id = edit_root_branch_id;
+
+ SVN_ERR(svn_branch__txn_complete(commit_txn, scratch_pool));
+ SVN_ERR(update_wc_eids(wc, scratch_pool));
+ SVN_ERR(update_wc_base(wc, ccbb.revision, scratch_pool));
+ SVN_ERR(display_diff_of_commit(&ccbb, scratch_pool));
+
+ wc->head_revision = ccbb.revision;
+ if (new_rev_p)
+ *new_rev_p = ccbb.revision;
+
+ wc->list_of_commands = NULL;
+
+ return SVN_NO_ERROR;
+}
+
+typedef enum action_code_t {
+ ACTION_INFO_WC,
+ ACTION_INFO,
+ ACTION_LIST_CONFLICTS,
+ ACTION_RESOLVED_CONFLICT,
+ ACTION_DIFF,
+ ACTION_LOG,
+ ACTION_LIST_BRANCHES,
+ ACTION_LIST_BRANCHES_R,
+ ACTION_LS,
+ ACTION_TBRANCH,
+ ACTION_BRANCH,
+ ACTION_BRANCH_INTO,
+ ACTION_MKBRANCH,
+ ACTION_MERGE3,
+ ACTION_AUTO_MERGE,
+ ACTION_MV,
+ ACTION_MKDIR,
+ ACTION_PUT_FILE,
+ ACTION_CAT,
+ ACTION_CP,
+ ACTION_RM,
+ ACTION_CP_RM,
+ ACTION_BR_RM,
+ ACTION_BR_INTO_RM,
+ ACTION_COMMIT,
+ ACTION_UPDATE,
+ ACTION_SWITCH,
+ ACTION_STATUS,
+ ACTION_REVERT,
+ ACTION_MIGRATE
+} action_code_t;
+
+typedef struct action_defn_t {
+ enum action_code_t code;
+ const char *name;
+ int num_args;
+ const char *args_help;
+ const char *help;
+} action_defn_t;
+
+#define NL "\n "
+static const action_defn_t action_defn[] =
+{
+ {ACTION_INFO_WC, "info-wc", 0, "",
+ "print information about the WC"},
+ {ACTION_INFO, "info", 1, "PATH",
+ "show info about the element at PATH"},
+ {ACTION_LIST_CONFLICTS, "conflicts", 0, "",
+ "list unresolved conflicts"},
+ {ACTION_RESOLVED_CONFLICT,"resolved", 1, "CONFLICT_ID",
+ "mark conflict as resolved"},
+ {ACTION_LIST_BRANCHES, "branches", 1, "PATH",
+ "list all branches rooted at the same element as PATH"},
+ {ACTION_LIST_BRANCHES_R, "ls-br-r", 0, "",
+ "list all branches, recursively"},
+ {ACTION_LS, "ls", 1, "PATH",
+ "list elements in the branch found at PATH"},
+ {ACTION_LOG, "log", 2, "FROM@REV TO@REV",
+ "show per-revision diffs between FROM and TO"},
+ {ACTION_TBRANCH, "tbranch", 1, "SRC",
+ "branch the branch-root or branch-subtree at SRC" NL
+ "to make a new top-level branch"},
+ {ACTION_BRANCH, "branch", 2, "SRC DST",
+ "branch the branch-root or branch-subtree at SRC" NL
+ "to make a new branch at DST"},
+ {ACTION_BRANCH_INTO, "branch-into", 2, "SRC DST",
+ "make a branch of the existing subtree SRC appear at" NL
+ "DST as part of the existing branch that contains DST" NL
+ "(like merging the creation of SRC to DST)"},
+ {ACTION_MKBRANCH, "mkbranch", 1, "ROOT",
+ "make a directory that's the root of a new subbranch"},
+ {ACTION_DIFF, "diff", 2, "LEFT@REV RIGHT@REV",
+ "show differences from subtree LEFT to subtree RIGHT"},
+ {ACTION_MERGE3, "merge", 3, "FROM TO YCA@REV",
+ "3-way merge YCA->FROM into TO"},
+ {ACTION_AUTO_MERGE, "automerge", 2, "FROM TO",
+ "automatic merge FROM into TO"},
+ {ACTION_CP, "cp", 2, "REV SRC DST",
+ "copy SRC@REV to DST"},
+ {ACTION_MV, "mv", 2, "SRC DST",
+ "move SRC to DST"},
+ {ACTION_RM, "rm", 1, "PATH",
+ "delete PATH"},
+ {ACTION_CP_RM, "copy-and-delete", 2, "SRC DST",
+ "copy-and-delete SRC to DST"},
+ {ACTION_BR_RM, "branch-and-delete", 2, "SRC DST",
+ "branch-and-delete SRC to DST"},
+ {ACTION_BR_INTO_RM, "branch-into-and-delete", 2, "SRC DST",
+ "merge-and-delete SRC to DST"},
+ {ACTION_MKDIR, "mkdir", 1, "PATH",
+ "create new directory PATH"},
+ {ACTION_PUT_FILE, "put", 2, "LOCAL_FILE PATH",
+ "add or modify file PATH with text copied from" NL
+ "LOCAL_FILE (use \"-\" to read from standard input)"},
+ {ACTION_CAT, "cat", 1, "PATH",
+ "display text (for a file) and props (if any) of PATH"},
+ {ACTION_COMMIT, "commit", 0, "",
+ "commit the changes"},
+ {ACTION_UPDATE, "update", 1, ".@REV",
+ "update to revision REV, keeping local changes"},
+ {ACTION_SWITCH, "switch", 1, "TARGET[@REV]",
+ "switch to another branch and/or revision, keeping local changes"},
+ {ACTION_STATUS, "status", 0, "",
+ "same as 'diff .@base .'"},
+ {ACTION_REVERT, "revert", 0, "",
+ "revert all uncommitted changes"},
+ {ACTION_MIGRATE, "migrate", 1, ".@REV",
+ "migrate changes from non-move-tracking revision"},
+};
+
+typedef struct action_t {
+ /* The original command words (const char *) by which the action was
+ specified */
+ apr_array_header_t *action_args;
+
+ action_code_t action;
+
+ /* argument revisions */
+ svn_opt_revision_t rev_spec[3];
+
+ const char *branch_id[3];
+
+ /* argument paths */
+ const char *relpath[3];
+} action_t;
+
+/* ====================================================================== */
+
+/* Find the deepest branch in the repository of which REVNUM:BRANCH_ID:RELPATH
+ * is either the root element or a normal, non-sub-branch element.
+ *
+ * RELPATH is a repository-relative path. REVNUM is a revision number, or
+ * SVN_INVALID_REVNUM meaning the current txn.
+ *
+ * Return the location of the element in that branch, or with
+ * EID=-1 if no element exists there.
+ *
+ * If BRANCH_ID is null, the default is the WC base branch when REVNUM is
+ * specified, and the WC working branch when REVNUM is SVN_INVALID_REVNUM.
+ *
+ * Return an error if branch BRANCH_ID does not exist in r<REVNUM>; otherwise,
+ * the result will never be NULL, as every path is within at least the root
+ * branch.
+ */
+static svn_error_t *
+find_el_rev_by_rrpath_rev(svn_branch__el_rev_id_t **el_rev_p,
+ svnmover_wc_t *wc,
+ const svn_opt_revision_t *rev_spec,
+ const char *branch_id,
+ const char *relpath,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ if (rev_spec->kind == svn_opt_revision_number
+ || rev_spec->kind == svn_opt_revision_head)
+ {
+ svn_revnum_t revnum
+ = (rev_spec->kind == svn_opt_revision_number)
+ ? rev_spec->value.number : wc->head_revision;
+ const svn_branch__repos_t *repos = wc->working->branch->txn->repos;
+
+ if (! branch_id)
+ branch_id = wc->base->branch->bid;
+ SVN_ERR(svn_branch__repos_find_el_rev_by_path_rev(el_rev_p, repos,
+ revnum,
+ branch_id,
+ relpath,
+ result_pool,
+ scratch_pool));
+ }
+ else if (rev_spec->kind == svn_opt_revision_unspecified
+ || rev_spec->kind == svn_opt_revision_working
+ || rev_spec->kind == svn_opt_revision_base
+ || rev_spec->kind == svn_opt_revision_committed)
+ {
+ svn_branch__state_t *branch
+ = branch_id ? svn_branch__txn_get_branch_by_id(
+ wc->working->branch->txn, branch_id, scratch_pool)
+ : wc->working->branch;
+ svn_branch__el_rev_id_t *el_rev = apr_palloc(result_pool, sizeof(*el_rev));
+
+ if (! branch)
+ return svn_error_createf(SVN_BRANCH__ERR, NULL,
+ _("Branch %s not found in working state"),
+ branch_id);
+ SVN_ERR(svn_branch__find_nested_branch_element_by_relpath(
+ &el_rev->branch, &el_rev->eid,
+ branch, relpath, scratch_pool));
+ if (rev_spec->kind == svn_opt_revision_unspecified
+ || rev_spec->kind == svn_opt_revision_working)
+ {
+ el_rev->rev = SVN_INVALID_REVNUM;
+ }
+ else
+ {
+ el_rev->rev = svnmover_wc_get_base_rev(wc, el_rev->branch,
+ el_rev->eid, scratch_pool);
+ }
+ *el_rev_p = el_rev;
+ }
+ else
+ {
+ return svn_error_createf(SVN_ERR_INCORRECT_PARAMS, NULL,
+ "'%s@...': revision specifier "
+ "must be a number or 'head', 'base' "
+ "or 'committed'",
+ relpath);
+ }
+ SVN_ERR_ASSERT(*el_rev_p);
+ return SVN_NO_ERROR;
+}
+
+/* Return a string suitable for appending to a displayed element name or
+ * element id to indicate that it is a subbranch root element for SUBBRANCH.
+ * Return "" if SUBBRANCH is null.
+ */
+static const char *
+branch_str(svn_branch__state_t *subbranch,
+ apr_pool_t *result_pool)
+{
+ if (subbranch)
+ return apr_psprintf(result_pool,
+ " (branch %s)",
+ svn_branch__get_id(subbranch, result_pool));
+ return "";
+}
+
+/* Return a string suitable for appending to a displayed element name or
+ * element id to indicate that BRANCH:EID is a subbranch root element.
+ * Return "" if the element is not a subbranch root element.
+ */
+static const char *
+subbranch_str(svn_branch__state_t *branch,
+ int eid,
+ apr_pool_t *result_pool)
+{
+ svn_branch__state_t *subbranch;
+
+ svn_error_clear(svn_branch__get_subbranch_at_eid(branch, &subbranch,
+ eid, result_pool));
+ return branch_str(subbranch, result_pool);
+}
+
+/* */
+static const char *
+subtree_subbranch_str(svn_branch__subtree_t *subtree,
+ const char *bid,
+ int eid,
+ apr_pool_t *result_pool)
+{
+ svn_branch__subtree_t *subbranch
+ = svn_branch__subtree_get_subbranch_at_eid(subtree, eid, result_pool);
+
+ if (subbranch)
+ return apr_psprintf(result_pool,
+ " (branch %s)",
+ svn_branch__id_nest(bid, eid, result_pool));
+ return "";
+}
+
+/* */
+static const char *
+el_rev_id_to_path(svn_branch__el_rev_id_t *el_rev,
+ apr_pool_t *result_pool)
+{
+ const char *path
+ = svn_branch__get_rrpath_by_eid(el_rev->branch, el_rev->eid, result_pool);
+
+ return path;
+}
+
+/* */
+static const char *
+branch_peid_name_to_path(svn_branch__state_t *to_branch,
+ int to_parent_eid,
+ const char *to_name,
+ apr_pool_t *result_pool)
+{
+ const char *path
+ = svn_relpath_join(svn_branch__get_rrpath_by_eid(to_branch, to_parent_eid,
+ result_pool),
+ to_name, result_pool);
+
+ return path;
+}
+
+/* */
+static int
+sort_compare_eid_mappings_by_path(const svn_sort__item_t *a,
+ const svn_sort__item_t *b)
+{
+ const char *astr = a->value, *bstr = b->value;
+
+ return svn_path_compare_paths(astr, bstr);
+}
+
+/* List the elements in BRANCH, in path notation.
+ *
+ * List only the elements for which a relpath is known -- that is, elements
+ * whose parents exist all the way up to the branch root.
+ */
+static svn_error_t *
+list_branch_elements(svn_branch__state_t *branch,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *eid_to_path = apr_hash_make(scratch_pool);
+ svn_element__tree_t *elements;
+ apr_hash_index_t *hi;
+ svn_eid__hash_iter_t *ei;
+
+ SVN_ERR(svn_branch__state_get_elements(branch, &elements, scratch_pool));
+ for (hi = apr_hash_first(scratch_pool, elements->e_map);
+ hi; hi = apr_hash_next(hi))
+ {
+ int eid = svn_eid__hash_this_key(hi);
+ const char *relpath = svn_branch__get_path_by_eid(branch, eid,
+ scratch_pool);
+
+ svn_eid__hash_set(eid_to_path, eid, relpath);
+ }
+ for (SVN_EID__HASH_ITER_SORTED(ei, eid_to_path,
+ sort_compare_eid_mappings_by_path,
+ scratch_pool))
+ {
+ int eid = ei->eid;
+ const char *relpath = ei->val;
+
+ svnmover_notify(" %-20s%s",
+ relpath[0] ? relpath : ".",
+ subbranch_str(branch, eid, scratch_pool));
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* */
+static int
+sort_compare_items_by_eid(const svn_sort__item_t *a,
+ const svn_sort__item_t *b)
+{
+ int eid_a = *(const int *)a->key;
+ int eid_b = *(const int *)b->key;
+
+ return eid_a - eid_b;
+}
+
+static const char *
+peid_name(const svn_element__content_t *element,
+ apr_pool_t *scratch_pool)
+{
+ if (element->parent_eid == -1)
+ return apr_psprintf(scratch_pool, "%3s %-10s", "", ".");
+
+ return apr_psprintf(scratch_pool, "%3d/%-10s",
+ element->parent_eid, element->name);
+}
+
+static const char elements_by_eid_header[]
+ = " eid parent-eid/name\n"
+ " --- ----------/----";
+
+/* List all elements in branch BRANCH, in element notation.
+ */
+static svn_error_t *
+list_branch_elements_by_eid(svn_branch__state_t *branch,
+ apr_pool_t *scratch_pool)
+{
+ svn_element__tree_t *elements;
+ svn_eid__hash_iter_t *ei;
+
+ svnmover_notify_v("%s", elements_by_eid_header);
+ SVN_ERR(svn_branch__state_get_elements(branch, &elements, scratch_pool));
+ for (SVN_EID__HASH_ITER_SORTED_BY_EID(ei, elements->e_map, scratch_pool))
+ {
+ int eid = ei->eid;
+ svn_element__content_t *element = ei->val;
+
+ if (element)
+ {
+ svnmover_notify(" e%-3d %21s%s",
+ eid,
+ peid_name(element, scratch_pool),
+ subbranch_str(branch, eid, scratch_pool));
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* */
+static const char *
+branch_id_header_str(const char *prefix,
+ apr_pool_t *result_pool)
+{
+ if (the_ui_mode == UI_MODE_PATHS)
+ {
+ return apr_psprintf(result_pool,
+ "%sbranch-id root-path\n"
+ "%s--------- ---------",
+ prefix, prefix);
+ }
+ else
+ {
+ return apr_psprintf(result_pool,
+ "%sbranch-id branch-name root-eid\n"
+ "%s--------- ----------- --------",
+ prefix, prefix);
+ }
+}
+
+/* Show the id and path or root-eid of BRANCH.
+ */
+static const char *
+branch_id_str(svn_branch__state_t *branch,
+ apr_pool_t *result_pool)
+{
+ apr_pool_t *scratch_pool = result_pool;
+
+ if (the_ui_mode == UI_MODE_PATHS)
+ {
+ return apr_psprintf(result_pool, "%-10s /%s",
+ svn_branch__get_id(branch, scratch_pool),
+ svn_branch__get_root_rrpath(branch, scratch_pool));
+ }
+ else
+ {
+ svn_element__content_t *outer_el = NULL;
+ svn_branch__state_t *outer_branch;
+ int outer_eid;
+
+ svn_branch__get_outer_branch_and_eid(&outer_branch, &outer_eid,
+ branch, scratch_pool);
+
+ if (outer_branch)
+ svn_error_clear(svn_branch__state_get_element(outer_branch, &outer_el,
+ outer_eid, scratch_pool));
+
+ return apr_psprintf(result_pool, "%-10s %-12s root=e%d",
+ svn_branch__get_id(branch, scratch_pool),
+ outer_el ? outer_el->name : "/",
+ svn_branch__root_eid(branch));
+ }
+}
+
+/* List the branch BRANCH.
+ *
+ * If WITH_ELEMENTS is true, also list the elements in it.
+ */
+static svn_error_t *
+list_branch(svn_branch__state_t *branch,
+ svn_boolean_t with_elements,
+ apr_pool_t *scratch_pool)
+{
+ svnmover_notify(" %s", branch_id_str(branch, scratch_pool));
+
+ if (with_elements)
+ {
+ if (the_ui_mode == UI_MODE_PATHS)
+ {
+ SVN_ERR(list_branch_elements(branch, scratch_pool));
+ }
+ else
+ {
+ SVN_ERR(list_branch_elements_by_eid(branch, scratch_pool));
+ }
+ }
+ return SVN_NO_ERROR;
+}
+
+/* List all branches rooted at EID.
+ *
+ * If WITH_ELEMENTS is true, also list the elements in each branch.
+ */
+static svn_error_t *
+list_branches(svn_branch__txn_t *txn,
+ int eid,
+ svn_boolean_t with_elements,
+ apr_pool_t *scratch_pool)
+{
+ const apr_array_header_t *branches;
+ int i;
+ svn_boolean_t printed_header = FALSE;
+
+ svnmover_notify_v("%s", branch_id_header_str(" ", scratch_pool));
+
+ branches = svn_branch__txn_get_branches(txn, scratch_pool);
+
+ for (i = 0; i < branches->nelts; i++)
+ {
+ svn_branch__state_t *branch = APR_ARRAY_IDX(branches, i, void *);
+
+ if (svn_branch__root_eid(branch) != eid)
+ continue;
+
+ SVN_ERR(list_branch(branch, with_elements, scratch_pool));
+ if (with_elements) /* separate branches by a blank line */
+ svnmover_notify("%s", "");
+ }
+
+ for (i = 0; i < branches->nelts; i++)
+ {
+ svn_branch__state_t *branch = APR_ARRAY_IDX(branches, i, void *);
+ svn_element__content_t *element;
+
+ SVN_ERR(svn_branch__state_get_element(branch, &element,
+ eid, scratch_pool));
+ if (! element
+ || svn_branch__root_eid(branch) == eid)
+ continue;
+
+ if (! printed_header)
+ {
+ if (the_ui_mode == UI_MODE_PATHS)
+ svnmover_notify_v("branches containing but not rooted at that element:");
+ else
+ svnmover_notify_v("branches containing but not rooted at e%d:", eid);
+ printed_header = TRUE;
+ }
+ SVN_ERR(list_branch(branch, with_elements, scratch_pool));
+ if (with_elements) /* separate branches by a blank line */
+ svnmover_notify("%s", "");
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* List all branches. If WITH_ELEMENTS is true, also list the elements
+ * in each branch.
+ */
+static svn_error_t *
+list_all_branches(svn_branch__txn_t *txn,
+ svn_boolean_t with_elements,
+ apr_pool_t *scratch_pool)
+{
+ const apr_array_header_t *branches;
+ int i;
+
+ branches = svn_branch__txn_get_branches(txn, scratch_pool);
+
+ svnmover_notify_v("branches:");
+
+ for (i = 0; i < branches->nelts; i++)
+ {
+ svn_branch__state_t *branch = APR_ARRAY_IDX(branches, i, void *);
+
+ SVN_ERR(list_branch(branch, with_elements, scratch_pool));
+ if (with_elements) /* separate branches by a blank line */
+ svnmover_notify("%s", "");
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Switch the WC to revision REVISION (SVN_INVALID_REVNUM means HEAD)
+ * and branch TARGET_BRANCH.
+ *
+ * Merge any changes in the existing txn into the new txn.
+ */
+static svn_error_t *
+do_switch(svnmover_wc_t *wc,
+ svn_revnum_t revision,
+ svn_branch__state_t *target_branch,
+ apr_pool_t *scratch_pool)
+{
+ const char *target_branch_id
+ = svn_branch__get_id(target_branch, scratch_pool);
+ /* Keep hold of the previous WC txn */
+ svn_branch__state_t *previous_base_br = wc->base->branch;
+ svn_branch__state_t *previous_working_br = wc->working->branch;
+ svn_boolean_t has_local_changes;
+
+ SVN_ERR(txn_is_changed(previous_working_br->txn,
+ &has_local_changes, scratch_pool));
+
+ /* Usually one would switch the WC to another branch (or just another
+ revision) rooted at the same element. Switching to a branch rooted
+ at a different element is well defined, but give a warning. */
+ if (has_local_changes
+ && svn_branch__root_eid(target_branch)
+ != svn_branch__root_eid(previous_base_br))
+ {
+ svnmover_notify(_("Warning: you are switching from %s rooted at e%d "
+ "to %s rooted at e%d, a different root element, "
+ "while there are local changes. "),
+ svn_branch__get_id(previous_base_br, scratch_pool),
+ svn_branch__root_eid(previous_base_br),
+ target_branch_id,
+ svn_branch__root_eid(target_branch));
+ }
+
+ /* Complete the old edit drive into the 'WC' txn */
+ SVN_ERR(svn_branch__txn_sequence_point(wc->edit_txn, scratch_pool));
+
+ /* Check out a new WC, re-using the same data object */
+ SVN_ERR(wc_checkout(wc, revision, target_branch_id, scratch_pool));
+
+ if (has_local_changes)
+ {
+ svn_branch__el_rev_id_t *yca, *src, *tgt;
+
+ /* Merge changes from the old into the new WC */
+ yca = svn_branch__el_rev_id_create(previous_base_br,
+ svn_branch__root_eid(previous_base_br),
+ previous_base_br->txn->rev,
+ scratch_pool);
+ src = svn_branch__el_rev_id_create(previous_working_br,
+ svn_branch__root_eid(previous_working_br),
+ SVN_INVALID_REVNUM, scratch_pool);
+ tgt = svn_branch__el_rev_id_create(wc->working->branch,
+ svn_branch__root_eid(wc->working->branch),
+ SVN_INVALID_REVNUM, scratch_pool);
+ SVN_ERR(svnmover_branch_merge(wc->edit_txn, tgt->branch,
+ &wc->conflicts,
+ src, tgt, yca, wc->pool, scratch_pool));
+
+ if (svnmover_any_conflicts(wc->conflicts))
+ {
+ SVN_ERR(svnmover_display_conflicts(wc->conflicts, scratch_pool));
+ }
+
+ /* ### TODO: If the merge raises conflicts, allow the user to revert
+ to the pre-update state or resolve the conflicts. Currently
+ this leaves the merge partially done and the pre-update state
+ is lost. */
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/*
+ */
+static svn_error_t *
+do_merge(svnmover_wc_t *wc,
+ svn_branch__el_rev_id_t *src,
+ svn_branch__el_rev_id_t *tgt,
+ svn_branch__el_rev_id_t *yca,
+ apr_pool_t *scratch_pool)
+{
+ svn_branch__history_t *history;
+
+ if (src->eid != tgt->eid || src->eid != yca->eid)
+ {
+ svnmover_notify(_("Warning: root elements differ in the requested merge "
+ "(from: e%d, to: e%d, yca: e%d)"),
+ src->eid, tgt->eid, yca->eid);
+ }
+
+ SVN_ERR(svnmover_branch_merge(wc->edit_txn, tgt->branch,
+ &wc->conflicts,
+ src, tgt, yca,
+ wc->pool, scratch_pool));
+
+ /* Update the history */
+ SVN_ERR(svn_branch__state_get_history(tgt->branch, &history, scratch_pool));
+ /* ### Assume this was a complete merge -- i.e. all changes up to YCA were
+ previously merged, so now SRC is a new parent. */
+ SVN_ERR(svn_branch__history_add_parent(history, src->rev, src->branch->bid,
+ scratch_pool));
+ SVN_ERR(svn_branch__state_set_history(tgt->branch, history, scratch_pool));
+ svnmover_notify_v(_("--- recorded merge parent as: %ld.%s"),
+ src->rev, src->branch->bid);
+
+ if (svnmover_any_conflicts(wc->conflicts))
+ {
+ SVN_ERR(svnmover_display_conflicts(wc->conflicts, scratch_pool));
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/*
+ */
+static svn_error_t *
+do_auto_merge(svnmover_wc_t *wc,
+ svn_branch__el_rev_id_t *src,
+ svn_branch__el_rev_id_t *tgt,
+ apr_pool_t *scratch_pool)
+{
+ svn_branch__rev_bid_t *yca;
+
+ /* Find the Youngest Common Ancestor.
+ ### TODO */
+ yca = NULL;
+
+ if (yca)
+ {
+ svn_branch__repos_t *repos = wc->working->branch->txn->repos;
+ svn_branch__state_t *yca_branch;
+ svn_branch__el_rev_id_t *_yca;
+
+ SVN_ERR(svn_branch__repos_get_branch_by_id(&yca_branch, repos,
+ yca->rev, yca->bid,
+ scratch_pool));
+ _yca = svn_branch__el_rev_id_create(yca_branch,
+ svn_branch__root_eid(yca_branch),
+ yca->rev, scratch_pool);
+
+ SVN_ERR(do_merge(wc, src, tgt, _yca, scratch_pool));
+ }
+ else
+ {
+ return svn_error_create(SVN_BRANCH__ERR, NULL,
+ _("Cannot perform automatic merge: "
+ "no YCA found"));
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Show the difference in history metadata between BRANCH1 and BRANCH2.
+ *
+ * If HEADER is non-null, print *HEADER and then set *HEADER to null.
+ *
+ * BRANCH1 and/or BRANCH2 may be null.
+ */
+static svn_error_t *
+show_history_r(svn_branch__state_t *branch,
+ const char *prefix,
+ apr_pool_t *scratch_pool)
+{
+ svn_branch__history_t *history = NULL;
+ svn_branch__subtree_t *subtree = NULL;
+ apr_hash_index_t *hi;
+
+ if (! branch)
+ return SVN_NO_ERROR;
+
+ SVN_ERR(svn_branch__state_get_history(branch, &history, scratch_pool));
+ svnmover_notify("%s%s: %s", prefix,
+ branch->bid, history_str(history, scratch_pool));
+
+ /* recurse into each subbranch */
+ SVN_ERR(svn_branch__get_subtree(branch, &subtree,
+ svn_branch__root_eid(branch),
+ scratch_pool));
+ for (hi = apr_hash_first(scratch_pool, subtree->subbranches);
+ hi; hi = apr_hash_next(hi))
+ {
+ int e = svn_eid__hash_this_key(hi);
+ svn_branch__state_t *subbranch = NULL;
+
+ SVN_ERR(svn_branch__get_subbranch_at_eid(branch, &subbranch, e,
+ scratch_pool));
+ if (subbranch)
+ {
+ SVN_ERR(show_history_r(subbranch, prefix, scratch_pool));
+ }
+ }
+ return SVN_NO_ERROR;
+}
+
+/* */
+typedef struct diff_item_t
+{
+ int eid;
+ svn_element__content_t *e0, *e1;
+ const char *relpath0, *relpath1;
+ svn_boolean_t modified, reparented, renamed;
+} diff_item_t;
+
+/* Return differences between branch subtrees S_LEFT and S_RIGHT.
+ * Diff the union of S_LEFT's and S_RIGHT's elements.
+ *
+ * Set *DIFF_CHANGES to a hash of (eid -> diff_item_t).
+ *
+ * ### This requires 'subtrees' only in order to produce the 'relpath'
+ * fields in the output. Other than that, it would work with arbitrary
+ * sets of elements.
+ */
+static svn_error_t *
+subtree_diff(apr_hash_t **diff_changes,
+ svn_branch__subtree_t *s_left,
+ svn_branch__subtree_t *s_right,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *diff_left_right;
+ apr_hash_index_t *hi;
+
+ *diff_changes = apr_hash_make(result_pool);
+
+ SVN_ERR(svnmover_element_differences(&diff_left_right,
+ s_left->tree, s_right->tree,
+ NULL /*union of s_left & s_right*/,
+ result_pool, scratch_pool));
+
+ for (hi = apr_hash_first(scratch_pool, diff_left_right);
+ hi; hi = apr_hash_next(hi))
+ {
+ int eid = svn_eid__hash_this_key(hi);
+ svn_element__content_t **e_pair = apr_hash_this_val(hi);
+ svn_element__content_t *e0 = e_pair[0], *e1 = e_pair[1];
+
+ if (e0 || e1)
+ {
+ diff_item_t *item = apr_palloc(result_pool, sizeof(*item));
+
+ item->eid = eid;
+ item->e0 = e0;
+ item->e1 = e1;
+ item->relpath0 = e0 ? svn_element__tree_get_path_by_eid(
+ s_left->tree, eid, result_pool) : NULL;
+ item->relpath1 = e1 ? svn_element__tree_get_path_by_eid(
+ s_right->tree, eid, result_pool) : NULL;
+ item->reparented = (e0 && e1 && e0->parent_eid != e1->parent_eid);
+ item->renamed = (e0 && e1 && strcmp(e0->name, e1->name) != 0);
+
+ svn_eid__hash_set(*diff_changes, eid, item);
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Find the relative order of diff items A and B, according to the
+ * "major path" of each. The major path means its right-hand relpath, if
+ * it exists on the right-hand side of the diff, else its left-hand relpath.
+ *
+ * Return negative/zero/positive when A sorts before/equal-to/after B.
+ */
+static int
+diff_ordering_major_paths(const struct svn_sort__item_t *a,
+ const struct svn_sort__item_t *b)
+{
+ const diff_item_t *item_a = a->value, *item_b = b->value;
+ int deleted_a = (item_a->e0 && ! item_a->e1);
+ int deleted_b = (item_b->e0 && ! item_b->e1);
+ const char *major_path_a = (item_a->e1 ? item_a->relpath1 : item_a->relpath0);
+ const char *major_path_b = (item_b->e1 ? item_b->relpath1 : item_b->relpath0);
+
+ /* Sort deleted items before all others */
+ if (deleted_a != deleted_b)
+ return deleted_b - deleted_a;
+
+ /* Sort by path */
+ return svn_path_compare_paths(major_path_a, major_path_b);
+}
+
+/* Display differences between subtrees LEFT and RIGHT, which are subtrees
+ * of branches LEFT_BID and RIGHT_BID respectively.
+ *
+ * Diff the union of LEFT's and RIGHT's elements.
+ *
+ * Use EDITOR to fetch content when needed.
+ *
+ * Write a line containing HEADER before any other output, if it is not
+ * null. Write PREFIX at the start of each line of output, including any
+ * header line. PREFIX and HEADER should contain no end-of-line characters.
+ *
+ * The output refers to paths or to elements according to THE_UI_MODE.
+ */
+static svn_error_t *
+show_subtree_diff(svn_branch__subtree_t *left,
+ const char *left_bid,
+ svn_branch__subtree_t *right,
+ const char *right_bid,
+ const char *prefix,
+ const char *header,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *diff_changes;
+ svn_eid__hash_iter_t *ei;
+
+ SVN_ERR_ASSERT(left && left->tree->root_eid != -1
+ && right && right->tree->root_eid != -1);
+
+ SVN_ERR(subtree_diff(&diff_changes, left, right,
+ scratch_pool, scratch_pool));
+
+ if (header && apr_hash_count(diff_changes))
+ svnmover_notify("%s%s", prefix, header);
+
+ for (SVN_EID__HASH_ITER_SORTED(ei, diff_changes,
+ (the_ui_mode == UI_MODE_EIDS)
+ ? sort_compare_items_by_eid
+ : diff_ordering_major_paths,
+ scratch_pool))
+ {
+ diff_item_t *item = ei->val;
+ svn_element__content_t *e0 = item->e0, *e1 = item->e1;
+ char status_mod = (e0 && e1) ? 'M' : e0 ? 'D' : 'A';
+
+ /* For a deleted element whose parent was also deleted, mark it is
+ less interesting, somehow. (Or we could omit it entirely.) */
+ if (status_mod == 'D')
+ {
+ diff_item_t *parent_item
+ = svn_eid__hash_get(diff_changes, e0->parent_eid);
+
+ if (parent_item && ! parent_item->e1)
+ status_mod = 'd';
+ }
+
+ if (the_ui_mode == UI_MODE_PATHS)
+ {
+ const char *major_path = (e1 ? item->relpath1 : item->relpath0);
+ const char *from = "";
+
+ if (item->reparented || item->renamed)
+ {
+ if (! item->reparented)
+ from = apr_psprintf(scratch_pool,
+ " (renamed from .../%s)",
+ e0->name);
+ else if (! item->renamed)
+ from = apr_psprintf(scratch_pool,
+ " (moved from %s/...)",
+ svn_relpath_dirname(item->relpath0,
+ scratch_pool));
+ else
+ from = apr_psprintf(scratch_pool,
+ " (moved+renamed from %s)",
+ item->relpath0);
+ }
+ svnmover_notify("%s%c%c%c %s%s%s",
+ prefix,
+ status_mod,
+ item->reparented ? 'v' : ' ',
+ item->renamed ? 'r' : ' ',
+ major_path,
+ subtree_subbranch_str(e0 ? left : right,
+ e0 ? left_bid : right_bid,
+ item->eid, scratch_pool),
+ from);
+ }
+ else
+ {
+ svnmover_notify("%s%c%c%c e%-3d %s%s%s%s%s",
+ prefix,
+ status_mod,
+ item->reparented ? 'v' : ' ',
+ item->renamed ? 'r' : ' ',
+ item->eid,
+ e1 ? peid_name(e1, scratch_pool) : "",
+ subtree_subbranch_str(e0 ? left : right,
+ e0 ? left_bid : right_bid,
+ item->eid, scratch_pool),
+ e0 && e1 ? " (from " : "",
+ e0 ? peid_name(e0, scratch_pool) : "",
+ e0 && e1 ? ")" : "");
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
+typedef svn_error_t *
+svn_branch__diff_func_t(svn_branch__subtree_t *left,
+ const char *left_bid,
+ svn_branch__subtree_t *right,
+ const char *right_bid,
+ const char *prefix,
+ const char *header,
+ apr_pool_t *scratch_pool);
+
+/* Display differences between subtrees LEFT and RIGHT.
+ *
+ * Recurse into sub-branches.
+ */
+static svn_error_t *
+subtree_diff_r(svn_branch__state_t *left_branch,
+ int left_root_eid,
+ svn_branch__state_t *right_branch,
+ int right_root_eid,
+ svn_branch__diff_func_t diff_func,
+ const char *prefix,
+ apr_pool_t *scratch_pool)
+{
+ svn_branch__subtree_t *left = NULL;
+ svn_branch__subtree_t *right = NULL;
+ const char *left_str
+ = left_branch
+ ? apr_psprintf(scratch_pool, "%s:e%d at /%s",
+ left_branch->bid, left_root_eid,
+ svn_branch__get_root_rrpath(left_branch, scratch_pool))
+ : NULL;
+ const char *right_str
+ = right_branch
+ ? apr_psprintf(scratch_pool, "%s:e%d at /%s",
+ right_branch->bid, right_root_eid,
+ svn_branch__get_root_rrpath(right_branch, scratch_pool))
+ : NULL;
+ const char *header;
+ apr_hash_t *subbranches_l, *subbranches_r, *subbranches_all;
+ apr_hash_index_t *hi;
+
+ if (left_branch)
+ {
+ SVN_ERR(svn_branch__get_subtree(left_branch, &left, left_root_eid,
+ scratch_pool));
+ }
+ if (right_branch)
+ {
+ SVN_ERR(svn_branch__get_subtree(right_branch, &right, right_root_eid,
+ scratch_pool));
+ }
+
+ if (!left)
+ {
+ header = apr_psprintf(scratch_pool,
+ "--- added branch %s",
+ right_str);
+ svnmover_notify("%s%s", prefix, header);
+ }
+ else if (!right)
+ {
+ header = apr_psprintf(scratch_pool,
+ "--- deleted branch %s",
+ left_str);
+ svnmover_notify("%s%s", prefix, header);
+ }
+ else
+ {
+ if (strcmp(left_str, right_str) == 0)
+ {
+ header = apr_psprintf(
+ scratch_pool, "--- diff branch %s",
+ left_str);
+ }
+ else
+ {
+ header = apr_psprintf(
+ scratch_pool, "--- diff branch %s : %s",
+ left_str, right_str);
+ }
+ SVN_ERR(diff_func(left, left_branch->bid, right, right_branch->bid,
+ prefix, header,
+ scratch_pool));
+ }
+
+ /* recurse into each subbranch that exists in LEFT and/or in RIGHT */
+ subbranches_l = left ? left->subbranches : apr_hash_make(scratch_pool);
+ subbranches_r = right ? right->subbranches : apr_hash_make(scratch_pool);
+ subbranches_all = hash_overlay(subbranches_l, subbranches_r);
+
+ for (hi = apr_hash_first(scratch_pool, subbranches_all);
+ hi; hi = apr_hash_next(hi))
+ {
+ int e = svn_eid__hash_this_key(hi);
+ svn_branch__state_t *left_subbranch = NULL, *right_subbranch = NULL;
+ int left_subbranch_eid = -1, right_subbranch_eid = -1;
+
+ /* recurse */
+ if (left_branch)
+ {
+ SVN_ERR(svn_branch__get_subbranch_at_eid(left_branch, &left_subbranch, e,
+ scratch_pool));
+ if (left_subbranch)
+ {
+ left_subbranch_eid = svn_branch__root_eid(left_subbranch);
+ }
+ }
+ if (right_branch)
+ {
+ SVN_ERR(svn_branch__get_subbranch_at_eid(right_branch, &right_subbranch, e,
+ scratch_pool));
+ if (right_subbranch)
+ {
+ right_subbranch_eid = svn_branch__root_eid(right_subbranch);
+ }
+ }
+ SVN_ERR(subtree_diff_r(left_subbranch, left_subbranch_eid,
+ right_subbranch, right_subbranch_eid,
+ diff_func, prefix, scratch_pool));
+ }
+ return SVN_NO_ERROR;
+}
+
+/* Display differences between branch subtrees LEFT and RIGHT.
+ *
+ * Recurse into sub-branches.
+ */
+static svn_error_t *
+branch_diff_r(svn_branch__el_rev_id_t *left,
+ svn_branch__el_rev_id_t *right,
+ svn_branch__diff_func_t diff_func,
+ const char *prefix,
+ apr_pool_t *scratch_pool)
+{
+ SVN_ERR(subtree_diff_r(left->branch, left->eid,
+ right->branch, right->eid,
+ diff_func, prefix, scratch_pool));
+ return SVN_NO_ERROR;
+}
+
+/* */
+static svn_error_t *
+do_copy(svn_branch__el_rev_id_t *from_el_rev,
+ svn_branch__state_t *to_branch,
+ svn_branch__eid_t to_parent_eid,
+ const char *new_name,
+ apr_pool_t *scratch_pool)
+{
+ const char *from_branch_id = svn_branch__get_id(from_el_rev->branch,
+ scratch_pool);
+ svn_branch__rev_bid_eid_t *src_el_rev
+ = svn_branch__rev_bid_eid_create(from_el_rev->rev, from_branch_id,
+ from_el_rev->eid, scratch_pool);
+ const char *from_path = el_rev_id_to_path(from_el_rev, scratch_pool);
+ const char *to_path = branch_peid_name_to_path(to_branch, to_parent_eid,
+ new_name, scratch_pool);
+
+ SVN_ERR(svn_branch__state_copy_tree(to_branch,
+ src_el_rev, to_parent_eid, new_name,
+ scratch_pool));
+ svnmover_notify_v("A+ %s (from %s)",
+ to_path, from_path);
+
+ return SVN_NO_ERROR;
+}
+
+/* */
+static svn_error_t *
+do_delete(svn_branch__state_t *branch,
+ svn_branch__eid_t eid,
+ apr_pool_t *scratch_pool)
+{
+ const char *path = svn_branch__get_rrpath_by_eid(branch, eid, scratch_pool);
+
+ SVN_ERR(svn_branch__state_delete_one(branch, eid, scratch_pool));
+ svnmover_notify_v("D %s", path);
+ return SVN_NO_ERROR;
+}
+
+/* */
+static svn_error_t *
+do_mkdir(svn_branch__txn_t *txn,
+ svn_branch__state_t *to_branch,
+ svn_branch__eid_t to_parent_eid,
+ const char *new_name,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *props = apr_hash_make(scratch_pool);
+ svn_element__payload_t *payload
+ = svn_element__payload_create_dir(props, scratch_pool);
+ int new_eid;
+ const char *path = branch_peid_name_to_path(to_branch, to_parent_eid,
+ new_name, scratch_pool);
+
+ SVN_ERR(svn_branch__txn_new_eid(txn, &new_eid, scratch_pool));
+ SVN_ERR(svn_branch__state_alter_one(to_branch, new_eid,
+ to_parent_eid, new_name, payload,
+ scratch_pool));
+ svnmover_notify_v("A %s",
+ path);
+ return SVN_NO_ERROR;
+}
+
+/* */
+static svn_error_t *
+do_put_file(svn_branch__txn_t *txn,
+ const char *local_file_path,
+ svn_branch__el_rev_id_t *file_el_rev,
+ svn_branch__el_rev_id_t *parent_el_rev,
+ const char *file_name,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *props;
+ svn_stringbuf_t *text;
+ int parent_eid;
+ const char *name;
+ svn_element__payload_t *payload;
+
+ if (file_el_rev->eid != -1)
+ {
+ /* get existing props */
+ svn_element__content_t *existing_element;
+
+ SVN_ERR(svn_branch__state_get_element(file_el_rev->branch,
+ &existing_element,
+ file_el_rev->eid, scratch_pool));
+ props = existing_element->payload->props;
+ }
+ else
+ {
+ props = apr_hash_make(scratch_pool);
+ }
+ /* read new text from file */
+ {
+ svn_stream_t *src;
+
+ if (strcmp(local_file_path, "-") != 0)
+ SVN_ERR(svn_stream_open_readonly(&src, local_file_path,
+ scratch_pool, scratch_pool));
+ else
+ SVN_ERR(svn_stream_for_stdin2(&src, FALSE, scratch_pool));
+
+ svn_stringbuf_from_stream(&text, src, 0, scratch_pool);
+ }
+ payload = svn_element__payload_create_file(props, text, scratch_pool);
+
+ if (is_branch_root_element(file_el_rev->branch,
+ file_el_rev->eid))
+ {
+ parent_eid = -1;
+ name = "";
+ }
+ else
+ {
+ parent_eid = parent_el_rev->eid;
+ name = file_name;
+ }
+
+ if (file_el_rev->eid != -1)
+ {
+ const char *path = el_rev_id_to_path(file_el_rev, scratch_pool);
+
+ SVN_ERR(svn_branch__state_alter_one(file_el_rev->branch, file_el_rev->eid,
+ parent_eid, name, payload,
+ scratch_pool));
+ svnmover_notify_v("M %s",
+ path);
+ }
+ else
+ {
+ int new_eid;
+ const char *path
+ = branch_peid_name_to_path(parent_el_rev->branch, parent_eid, name,
+ scratch_pool);
+
+ SVN_ERR(svn_branch__txn_new_eid(txn, &new_eid, scratch_pool));
+ SVN_ERR(svn_branch__state_alter_one(parent_el_rev->branch, new_eid,
+ parent_eid, name, payload,
+ scratch_pool));
+ file_el_rev->eid = new_eid;
+ svnmover_notify_v("A %s",
+ path);
+ }
+ return SVN_NO_ERROR;
+}
+
+/* */
+static svn_error_t *
+do_cat(svn_branch__el_rev_id_t *file_el_rev,
+ apr_pool_t *scratch_pool)
+{
+ apr_hash_t *props;
+ svn_stringbuf_t *text;
+ svn_element__content_t *existing_element;
+ apr_hash_index_t *hi;
+
+ /* get existing props */
+ SVN_ERR(svn_branch__state_get_element(file_el_rev->branch, &existing_element,
+ file_el_rev->eid, scratch_pool));
+
+ props = existing_element->payload->props;
+ text = existing_element->payload->text;
+
+ for (hi = apr_hash_first(scratch_pool, props); hi; hi = apr_hash_next(hi))
+ {
+ const char *pname = apr_hash_this_key(hi);
+ svn_string_t *pval = apr_hash_this_val(hi);
+
+ svnmover_notify("property '%s': '%s'", pname, pval->data);
+ }
+ if (text)
+ {
+ svnmover_notify("%s", text->data);
+ }
+ return SVN_NO_ERROR;
+}
+
+/* Find the main parent of branch-state BRANCH. That means:
+ * - the only parent (in the case of straight history or branching), else
+ * - the parent with the same branch id (in the case of normal merging), else
+ * - none (in the case of a new unrelated branch, or a new branch formed
+ * by merging two or more other branches).
+ */
+static svn_error_t *
+find_branch_main_parent(svn_branch__state_t *branch,
+ svn_branch__rev_bid_t **predecessor_p,
+ apr_pool_t *result_pool)
+{
+ svn_branch__history_t *history;
+ svn_branch__rev_bid_t *our_own_history;
+ svn_branch__rev_bid_t *predecessor = NULL;
+
+ SVN_ERR(svn_branch__state_get_history(branch, &history, result_pool));
+ if (apr_hash_count(history->parents) == 1)
+ {
+ apr_hash_index_t *hi = apr_hash_first(result_pool, history->parents);
+
+ predecessor = apr_hash_this_val(hi);
+ }
+ else if ((our_own_history = svn_hash_gets(history->parents, branch->bid)))
+ {
+ predecessor = our_own_history;
+ }
+
+ if (predecessor_p)
+ *predecessor_p = predecessor;
+ return SVN_NO_ERROR;
+}
+
+/* Set *NEW_EL_REV_P to the location where OLD_EL_REV was in the previous
+ * revision. Follow the "main line" of any branching in its history.
+ *
+ * If the same EID...
+ */
+static svn_error_t *
+svn_branch__find_predecessor_el_rev(svn_branch__el_rev_id_t **new_el_rev_p,
+ svn_branch__el_rev_id_t *old_el_rev,
+ apr_pool_t *result_pool)
+{
+ const svn_branch__repos_t *repos = old_el_rev->branch->txn->repos;
+ svn_branch__rev_bid_t *predecessor;
+ svn_branch__state_t *branch;
+
+ SVN_ERR(find_branch_main_parent(old_el_rev->branch,
+ &predecessor, result_pool));
+ if (! predecessor)
+ {
+ *new_el_rev_p = NULL;
+ return SVN_NO_ERROR;
+ }
+
+ SVN_ERR(svn_branch__repos_get_branch_by_id(&branch,
+ repos, predecessor->rev,
+ predecessor->bid, result_pool));
+ *new_el_rev_p = svn_branch__el_rev_id_create(branch, old_el_rev->eid,
+ predecessor->rev, result_pool);
+
+ return SVN_NO_ERROR;
+}
+
+/* Similar to 'svn log -v', this iterates over the revisions between
+ * LEFT and RIGHT (currently excluding LEFT), printing a single-rev diff
+ * for each.
+ */
+static svn_error_t *
+do_log(svn_branch__el_rev_id_t *left,
+ svn_branch__el_rev_id_t *right,
+ apr_pool_t *scratch_pool)
+{
+ svn_revnum_t first_rev = left->rev;
+
+ while (right->rev > first_rev)
+ {
+ svn_branch__el_rev_id_t *el_rev_left;
+
+ SVN_ERR(svn_branch__find_predecessor_el_rev(&el_rev_left, right, scratch_pool));
+
+ svnmover_notify(SVN_CL__LOG_SEP_STRING "r%ld | ...",
+ right->rev);
+ svnmover_notify("History:");
+ SVN_ERR(show_history_r(right->branch, " ", scratch_pool));
+ svnmover_notify("Changed elements:");
+ SVN_ERR(branch_diff_r(el_rev_left, right,
+ show_subtree_diff, " ",
+ scratch_pool));
+ right = el_rev_left;
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Make a subbranch at OUTER_BRANCH : OUTER_PARENT_EID : OUTER_NAME.
+ *
+ * The subbranch will consist of a single element given by PAYLOAD.
+ */
+static svn_error_t *
+do_mkbranch(const char **new_branch_id_p,
+ svn_branch__txn_t *txn,
+ svn_branch__state_t *outer_branch,
+ int outer_parent_eid,
+ const char *outer_name,
+ svn_element__payload_t *payload,
+ apr_pool_t *scratch_pool)
+{
+ const char *outer_branch_id = svn_branch__get_id(outer_branch, scratch_pool);
+ int new_outer_eid, new_inner_eid;
+ const char *new_branch_id;
+ svn_branch__state_t *new_branch;
+ const char *path = branch_peid_name_to_path(outer_branch, outer_parent_eid,
+ outer_name, scratch_pool);
+
+ SVN_ERR(svn_branch__txn_new_eid(txn, &new_outer_eid, scratch_pool));
+ SVN_ERR(svn_branch__state_alter_one(outer_branch, new_outer_eid,
+ outer_parent_eid, outer_name,
+ svn_element__payload_create_subbranch(
+ scratch_pool), scratch_pool));
+
+ SVN_ERR(svn_branch__txn_new_eid(txn, &new_inner_eid, scratch_pool));
+ new_branch_id = svn_branch__id_nest(outer_branch_id, new_outer_eid,
+ scratch_pool);
+ SVN_ERR(svn_branch__txn_open_branch(txn, &new_branch,
+ new_branch_id, new_inner_eid,
+ NULL /*tree_ref*/,
+ scratch_pool, scratch_pool));
+ SVN_ERR(svn_branch__state_alter_one(new_branch, new_inner_eid,
+ -1, "", payload, scratch_pool));
+
+ svnmover_notify_v("A %s (branch %s)",
+ path,
+ new_branch->bid);
+ if (new_branch_id_p)
+ *new_branch_id_p = new_branch->bid;
+ return SVN_NO_ERROR;
+}
+
+/* Branch all or part of an existing branch, making a new branch.
+ *
+ * Branch the subtree of FROM_BRANCH found at FROM_EID, to create
+ * a new branch at TO_OUTER_BRANCH:TO_OUTER_PARENT_EID:NEW_NAME.
+ *
+ * FROM_BRANCH:FROM_EID must be an existing element. It may be the
+ * root of FROM_BRANCH. It must not be the root of a subbranch of
+ * FROM_BRANCH.
+ *
+ * TO_OUTER_BRANCH:TO_OUTER_PARENT_EID must be an existing directory
+ * and NEW_NAME must be nonexistent in that directory.
+ */
+static svn_error_t *
+do_branch(svn_branch__state_t **new_branch_p,
+ svn_branch__txn_t *txn,
+ svn_branch__rev_bid_eid_t *from,
+ svn_branch__state_t *to_outer_branch,
+ svn_branch__eid_t to_outer_parent_eid,
+ const char *new_name,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ const char *to_outer_branch_id
+ = to_outer_branch ? svn_branch__get_id(to_outer_branch, scratch_pool) : NULL;
+ int to_outer_eid;
+ const char *new_branch_id;
+ svn_branch__state_t *new_branch;
+ svn_branch__history_t *history;
+ const char *to_path
+ = branch_peid_name_to_path(to_outer_branch,
+ to_outer_parent_eid, new_name, scratch_pool);
+
+ /* assign new eid to root element (outer branch) */
+ SVN_ERR(svn_branch__txn_new_eid(txn, &to_outer_eid, scratch_pool));
+
+ new_branch_id = svn_branch__id_nest(to_outer_branch_id, to_outer_eid,
+ scratch_pool);
+ SVN_ERR(svn_branch__txn_open_branch(txn, &new_branch,
+ new_branch_id, from->eid, from,
+ result_pool, scratch_pool));
+ history = svn_branch__history_create_empty(scratch_pool);
+ SVN_ERR(svn_branch__history_add_parent(history, from->rev, from->bid,
+ scratch_pool));
+ SVN_ERR(svn_branch__state_set_history(new_branch, history, scratch_pool));
+ SVN_ERR(svn_branch__state_alter_one(to_outer_branch, to_outer_eid,
+ to_outer_parent_eid, new_name,
+ svn_element__payload_create_subbranch(
+ scratch_pool), scratch_pool));
+
+ svnmover_notify_v("A+ %s (branch %s)",
+ to_path,
+ new_branch->bid);
+
+ if (new_branch_p)
+ *new_branch_p = new_branch;
+ return SVN_NO_ERROR;
+}
+
+static svn_error_t *
+do_topbranch(svn_branch__state_t **new_branch_p,
+ svn_branch__txn_t *txn,
+ svn_branch__rev_bid_eid_t *from,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ int outer_eid;
+ const char *new_branch_id;
+ svn_branch__state_t *new_branch;
+
+ SVN_ERR(svn_branch__txn_new_eid(txn, &outer_eid, scratch_pool));
+ new_branch_id = svn_branch__id_nest(NULL /*outer_branch*/, outer_eid,
+ scratch_pool);
+ SVN_ERR(svn_branch__txn_open_branch(txn, &new_branch,
+ new_branch_id, from->eid, from,
+ result_pool, scratch_pool));
+
+ svnmover_notify_v("A+ (branch %s)",
+ new_branch->bid);
+
+ if (new_branch_p)
+ *new_branch_p = new_branch;
+ return SVN_NO_ERROR;
+}
+
+/* Branch the subtree of FROM_BRANCH found at FROM_EID, to appear
+ * in the existing branch TO_BRANCH at TO_PARENT_EID:NEW_NAME.
+ *
+ * This is like merging the creation of the source subtree into TO_BRANCH.
+ *
+ * Any elements of the source subtree that already exist in TO_BRANCH
+ * are altered. This is like resolving any merge conflicts as 'theirs'.
+ *
+ * (### Sometimes the user might prefer that we throw an error if any
+ * element of the source subtree already exists in TO_BRANCH.)
+ */
+static svn_error_t *
+do_branch_into(svn_branch__state_t *from_branch,
+ int from_eid,
+ svn_branch__state_t *to_branch,
+ svn_branch__eid_t to_parent_eid,
+ const char *new_name,
+ apr_pool_t *scratch_pool)
+{
+ svn_branch__subtree_t *from_subtree;
+ svn_element__content_t *new_root_content;
+ const char *to_path = branch_peid_name_to_path(to_branch, to_parent_eid,
+ new_name, scratch_pool);
+
+ /* Source element must exist */
+ if (! svn_branch__get_path_by_eid(from_branch, from_eid, scratch_pool))
+ {
+ return svn_error_createf(SVN_BRANCH__ERR, NULL,
+ _("Cannot branch from %s e%d: "
+ "does not exist"),
+ svn_branch__get_id(
+ from_branch, scratch_pool), from_eid);
+ }
+
+ SVN_ERR(svn_branch__get_subtree(from_branch, &from_subtree, from_eid,
+ scratch_pool));
+
+ /* Change this subtree's root element to TO_PARENT_EID/NEW_NAME. */
+ new_root_content
+ = svn_element__tree_get(from_subtree->tree, from_subtree->tree->root_eid);
+ new_root_content
+ = svn_element__content_create(to_parent_eid, new_name,
+ new_root_content->payload, scratch_pool);
+ svn_element__tree_set(from_subtree->tree, from_subtree->tree->root_eid,
+ new_root_content);
+
+ /* Populate the new branch mapping */
+ SVN_ERR(svn_branch__instantiate_elements_r(to_branch, *from_subtree,
+ scratch_pool));
+ svnmover_notify_v("A+ %s (subtree)",
+ to_path);
+
+ return SVN_NO_ERROR;
+}
+
+/* Copy-and-delete.
+ *
+ * copy the subtree at EL_REV to TO_BRANCH:TO_PARENT_EID:TO_NAME
+ * delete the subtree at EL_REV
+ */
+static svn_error_t *
+do_copy_and_delete(svn_branch__el_rev_id_t *el_rev,
+ svn_branch__state_t *to_branch,
+ int to_parent_eid,
+ const char *to_name,
+ apr_pool_t *scratch_pool)
+{
+ const char *from_path
+ = svn_branch__get_rrpath_by_eid(el_rev->branch, el_rev->eid, scratch_pool);
+
+ SVN_ERR_ASSERT(! is_branch_root_element(el_rev->branch, el_rev->eid));
+
+ SVN_ERR(do_copy(el_rev, to_branch, to_parent_eid, to_name,
+ scratch_pool));
+
+ SVN_ERR(svn_branch__state_delete_one(el_rev->branch, el_rev->eid,
+ scratch_pool));
+ svnmover_notify_v("D %s", from_path);
+
+ return SVN_NO_ERROR;
+}
+
+/* Branch-and-delete.
+ *
+ * branch the subtree at EL_REV creating a new nested branch at
+ * TO_BRANCH:TO_PARENT_EID:TO_NAME,
+ * or creating a new top-level branch if TO_BRANCH is null;
+ * delete the subtree at EL_REV
+ */
+static svn_error_t *
+do_branch_and_delete(svn_branch__txn_t *edit_txn,
+ svn_branch__el_rev_id_t *el_rev,
+ svn_branch__state_t *to_outer_branch,
+ int to_outer_parent_eid,
+ const char *to_name,
+ apr_pool_t *scratch_pool)
+{
+ const char *from_branch_id = svn_branch__get_id(el_rev->branch,
+ scratch_pool);
+ svn_branch__rev_bid_eid_t *from
+ = svn_branch__rev_bid_eid_create(el_rev->rev, from_branch_id,
+ el_rev->eid, scratch_pool);
+ svn_branch__state_t *new_branch;
+ const char *from_path
+ = svn_branch__get_rrpath_by_eid(el_rev->branch, el_rev->eid, scratch_pool);
+
+ SVN_ERR_ASSERT(! is_branch_root_element(el_rev->branch, el_rev->eid));
+
+ SVN_ERR(do_branch(&new_branch, edit_txn, from,
+ to_outer_branch, to_outer_parent_eid, to_name,
+ scratch_pool, scratch_pool));
+
+ SVN_ERR(svn_branch__state_delete_one(el_rev->branch, el_rev->eid,
+ scratch_pool));
+ svnmover_notify_v("D %s", from_path);
+
+ return SVN_NO_ERROR;
+}
+
+/* Branch-into-and-delete.
+ *
+ * (Previously, confusingly, called 'branch-and-delete'.)
+ *
+ * The target branch is different from the source branch.
+ *
+ * delete elements from source branch
+ * instantiate (or update) same elements in target branch
+ *
+ * For each element being moved, if the element already exists in TO_BRANCH,
+ * the effect is as if the existing element in TO_BRANCH was first deleted.
+ */
+static svn_error_t *
+do_branch_into_and_delete(svn_branch__el_rev_id_t *el_rev,
+ svn_branch__state_t *to_branch,
+ int to_parent_eid,
+ const char *to_name,
+ apr_pool_t *scratch_pool)
+{
+ const char *from_path
+ = svn_branch__get_rrpath_by_eid(el_rev->branch, el_rev->eid, scratch_pool);
+
+ SVN_ERR_ASSERT(! is_branch_root_element(el_rev->branch, el_rev->eid));
+
+ /* This is supposed to be used for moving to a *different* branch.
+ In fact, this method would also work for moving within one
+ branch, but we don't currently want to use it for that purpose. */
+ SVN_ERR_ASSERT(! BRANCH_IS_SAME_BRANCH(el_rev->branch, to_branch,
+ scratch_pool));
+
+ /* Merge the "creation of the source" to the target (aka branch-into) */
+ SVN_ERR(do_branch_into(el_rev->branch, el_rev->eid,
+ to_branch, to_parent_eid, to_name,
+ scratch_pool));
+
+ SVN_ERR(svn_branch__state_delete_one(el_rev->branch, el_rev->eid,
+ scratch_pool));
+ svnmover_notify_v("D %s", from_path);
+
+ return SVN_NO_ERROR;
+}
+
+/* Interactive options for moving to another branch.
+ */
+static svn_error_t *
+do_interactive_cross_branch_move(svn_branch__txn_t *txn,
+ svn_branch__el_rev_id_t *el_rev,
+ svn_branch__el_rev_id_t *to_parent_el_rev,
+ const char *to_name,
+ apr_pool_t *scratch_pool)
+{
+ svn_error_t *err;
+ const char *input;
+
+ if (0 /*### if non-interactive*/)
+ {
+ return svn_error_createf(SVN_BRANCH__ERR, NULL,
+ _("mv: The source and target are in different branches. "
+ "Some ways to move content to a different branch are, "
+ "depending on the effect you want to achieve: "
+ "copy-and-delete, branch-and-delete, branch-into-and-delete"));
+ }
+
+ svnmover_notify_v(
+ _("mv: The source and target are in different branches. "
+ "Some ways to move content to a different branch are, "
+ "depending on the effect you want to achieve:\n"
+ " c: copy-and-delete: cp SOURCE TARGET; rm SOURCE\n"
+ " b: branch-and-delete: branch SOURCE TARGET; rm SOURCE\n"
+ " i: branch-into-and-delete: branch-into SOURCE TARGET; rm SOURCE\n"
+ "We can do one of these for you now if you wish.\n"
+ ));
+
+ settext_stderr(TEXT_FG_YELLOW);
+ err = svn_cmdline_prompt_user2(
+ &input,
+ "Your choice (c, b, i, or just <enter> to do nothing): ",
+ NULL, scratch_pool);
+ settext(TEXT_RESET);
+ if (err && (err->apr_err == SVN_ERR_CANCELLED || err->apr_err == APR_EOF))
+ {
+ svn_error_clear(err);
+ return SVN_NO_ERROR;
+ }
+ SVN_ERR(err);
+
+ if (input[0] == 'c' || input[0] == 'C')
+ {
+ svnmover_notify_v("Performing 'copy-and-delete SOURCE TARGET'");
+
+ SVN_ERR(do_copy_and_delete(el_rev,
+ to_parent_el_rev->branch,
+ to_parent_el_rev->eid, to_name,
+ scratch_pool));
+ }
+ else if (input[0] == 'b' || input[0] == 'B')
+ {
+ svnmover_notify_v("Performing 'branch-and-delete SOURCE TARGET'");
+
+ SVN_ERR(do_branch_and_delete(txn, el_rev,
+ to_parent_el_rev->branch,
+ to_parent_el_rev->eid, to_name,
+ scratch_pool));
+ }
+ else if (input[0] == 'i' || input[0] == 'I')
+ {
+ svnmover_notify_v("Performing 'branch-into-and-delete SOURCE TARGET'");
+ svnmover_notify_v(
+ "In the current implementation of this experimental UI, each element "
+ "instance from the source branch subtree will overwrite any instance "
+ "of the same element that already exists in the target branch."
+ );
+ /* We could instead either throw an error or fall back to copy-and-delete
+ if any moved element already exists in target branch. */
+
+ SVN_ERR(do_branch_into_and_delete(el_rev,
+ to_parent_el_rev->branch,
+ to_parent_el_rev->eid, to_name,
+ scratch_pool));
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Move.
+ */
+static svn_error_t *
+do_move(svn_branch__el_rev_id_t *el_rev,
+ svn_branch__el_rev_id_t *to_parent_el_rev,
+ const char *to_name,
+ apr_pool_t *scratch_pool)
+{
+ const char *from_path = el_rev_id_to_path(el_rev, scratch_pool);
+ const char *to_path
+ = branch_peid_name_to_path(to_parent_el_rev->branch,
+ to_parent_el_rev->eid, to_name, scratch_pool);
+ /* New payload shall be the same as before */
+ svn_element__content_t *existing_element;
+
+ SVN_ERR(svn_branch__state_get_element(el_rev->branch, &existing_element,
+ el_rev->eid, scratch_pool));
+ SVN_ERR(svn_branch__state_alter_one(el_rev->branch, el_rev->eid,
+ to_parent_el_rev->eid, to_name,
+ existing_element->payload, scratch_pool));
+ svnmover_notify_v("V %s (from %s)",
+ to_path, from_path);
+ return SVN_NO_ERROR;
+}
+
+/* This commit callback prints not only a commit summary line but also
+ * a log-style summary of the changes.
+ */
+static svn_error_t *
+commit_callback(const svn_commit_info_t *commit_info,
+ void *baton,
+ apr_pool_t *pool)
+{
+ commit_callback_baton_t *b = baton;
+
+ svnmover_notify("Committed r%ld:", commit_info->revision);
+
+ b->revision = commit_info->revision;
+ return SVN_NO_ERROR;
+}
+
+/* Display a diff of the commit */
+static svn_error_t *
+display_diff_of_commit(const commit_callback_baton_t *ccbb,
+ apr_pool_t *scratch_pool)
+{
+ svn_branch__txn_t *previous_head_txn
+ = svn_branch__repos_get_base_revision_root(ccbb->edit_txn);
+ svn_branch__state_t *base_branch
+ = svn_branch__txn_get_branch_by_id(previous_head_txn,
+ ccbb->wc_base_branch_id,
+ scratch_pool);
+ svn_branch__state_t *committed_branch
+ = svn_branch__txn_get_branch_by_id(ccbb->edit_txn,
+ ccbb->wc_commit_branch_id,
+ scratch_pool);
+ svn_branch__el_rev_id_t *el_rev_left
+ = svn_branch__el_rev_id_create(base_branch, svn_branch__root_eid(base_branch),
+ base_branch->txn->rev,
+ scratch_pool);
+ svn_branch__el_rev_id_t *el_rev_right
+ = svn_branch__el_rev_id_create(committed_branch,
+ svn_branch__root_eid(committed_branch),
+ committed_branch->txn->rev,
+ scratch_pool);
+
+ SVN_ERR(branch_diff_r(el_rev_left, el_rev_right,
+ show_subtree_diff, " ",
+ scratch_pool));
+ return SVN_NO_ERROR;
+}
+
+static svn_error_t *
+commit(svn_revnum_t *new_rev_p,
+ svnmover_wc_t *wc,
+ apr_hash_t *revprops,
+ apr_pool_t *scratch_pool)
+{
+ if (svnmover_any_conflicts(wc->conflicts))
+ {
+ return svn_error_createf(SVN_BRANCH__ERR, NULL,
+ _("Cannot commit because there are "
+ "unresolved conflicts"));
+ }
+
+ /* Complete the old edit drive (editing the WC working state) */
+ SVN_ERR(svn_branch__txn_sequence_point(wc->edit_txn, scratch_pool));
+
+ /* Just as in execute() the pool must be a subpool of wc->pool. */
+ SVN_ERR(wc_commit(new_rev_p, wc, revprops, wc->pool));
+
+ return SVN_NO_ERROR;
+}
+
+/* Commit.
+ *
+ * Set *NEW_REV_P to the committed revision number. Update the WC base of
+ * each committed element to that revision.
+ *
+ * If there are no changes to commit, set *NEW_REV_P to SVN_INVALID_REVNUM
+ * and do not make a commit.
+ *
+ * NEW_REV_P may be null if not wanted.
+ */
+static svn_error_t *
+do_commit(svn_revnum_t *new_rev_p,
+ svnmover_wc_t *wc,
+ apr_hash_t *revprops,
+ apr_pool_t *scratch_pool)
+{
+ svn_revnum_t new_rev;
+
+ SVN_ERR(commit(&new_rev, wc, revprops, scratch_pool));
+
+ if (new_rev_p)
+ *new_rev_p = new_rev;
+ return SVN_NO_ERROR;
+}
+
+/* Revert all uncommitted changes in WC.
+ */
+static svn_error_t *
+do_revert(svnmover_wc_t *wc,
+ apr_pool_t *scratch_pool)
+{
+ /* Replay the inverse of the current edit txn, into the current edit txn */
+ SVN_ERR(replay(wc->edit_txn, wc->working->branch,
+ wc->working->branch,
+ wc->base->branch,
+ scratch_pool));
+ wc->conflicts = NULL;
+
+ return SVN_NO_ERROR;
+}
+
+/* Migration replay baton */
+typedef struct migrate_replay_baton_t {
+ svn_branch__txn_t *edit_txn;
+ svn_ra_session_t *from_session;
+ /* Hash (by revnum) of array of svn_repos_move_info_t. */
+ apr_hash_t *moves;
+} migrate_replay_baton_t;
+
+/* Callback function for svn_ra_replay_range, invoked when starting to parse
+ * a replay report.
+ */
+static svn_error_t *
+migrate_replay_rev_started(svn_revnum_t revision,
+ void *replay_baton,
+ const svn_delta_editor_t **editor,
+ void **edit_baton,
+ apr_hash_t *rev_props,
+ apr_pool_t *pool)
+{
+ migrate_replay_baton_t *rb = replay_baton;
+ const svn_delta_editor_t *old_editor;
+ void *old_edit_baton;
+
+ svnmover_notify("migrate: start r%ld", revision);
+
+ SVN_ERR(svn_branch__compat_get_migration_editor(&old_editor, &old_edit_baton,
+ rb->edit_txn,
+ rb->from_session, revision,
+ pool));
+ SVN_ERR(svn_delta__get_debug_editor(&old_editor, &old_edit_baton,
+ old_editor, old_edit_baton,
+ "migrate: ", pool));
+
+ *editor = old_editor;
+ *edit_baton = old_edit_baton;
+
+ return SVN_NO_ERROR;
+}
+
+/* Callback function for svn_ra_replay_range, invoked when finishing parsing
+ * a replay report.
+ */
+static svn_error_t *
+migrate_replay_rev_finished(svn_revnum_t revision,
+ void *replay_baton,
+ const svn_delta_editor_t *editor,
+ void *edit_baton,
+ apr_hash_t *rev_props,
+ apr_pool_t *pool)
+{
+ migrate_replay_baton_t *rb = replay_baton;
+ apr_array_header_t *moves_in_revision
+ = apr_hash_get(rb->moves, &revision, sizeof(revision));
+
+ SVN_ERR(editor->close_edit(edit_baton, pool));
+
+ svnmover_notify("migrate: moves in revision r%ld:", revision);
+
+ if (moves_in_revision)
+ {
+ int i;
+
+ for (i = 0; i < moves_in_revision->nelts; i++)
+ {
+ svn_repos_move_info_t *this_move
+ = APR_ARRAY_IDX(moves_in_revision, i, void *);
+
+ if (this_move)
+ {
+ svnmover_notify("%s",
+ svn_client__format_move_chain_for_display(this_move,
+ "", pool));
+ }
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Migrate changes from non-move-tracking revisions.
+ */
+static svn_error_t *
+do_migrate(svnmover_wc_t *wc,
+ svn_revnum_t start_revision,
+ svn_revnum_t end_revision,
+ apr_pool_t *scratch_pool)
+{
+ migrate_replay_baton_t *rb = apr_pcalloc(scratch_pool, sizeof(*rb));
+
+ if (start_revision < 1 || end_revision < 1
+ || start_revision > end_revision
+ || end_revision > wc->head_revision)
+ {
+ return svn_error_createf(SVN_ERR_INCORRECT_PARAMS, NULL,
+ _("migrate: Bad revision range (%ld to %ld); "
+ "minimum is 1 and maximum (head) is %ld"),
+ start_revision, end_revision,
+ wc->head_revision);
+ }
+
+ /* Scan the repository log for move info */
+ SVN_ERR(svn_client__get_repos_moves(&rb->moves,
+ "" /*(unused)*/,
+ wc->ra_session,
+ start_revision, end_revision,
+ wc->ctx, scratch_pool, scratch_pool));
+
+ rb->edit_txn = wc->edit_txn;
+ rb->from_session = wc->ra_session;
+ SVN_ERR(svn_ra_replay_range(rb->from_session,
+ start_revision, end_revision,
+ 0, TRUE,
+ migrate_replay_rev_started,
+ migrate_replay_rev_finished,
+ rb, scratch_pool));
+ return SVN_NO_ERROR;
+}
+
+static svn_error_t *
+show_branch_history(svn_branch__state_t *branch,
+ apr_pool_t *scratch_pool)
+{
+ svn_branch__history_t *history;
+ svn_branch__rev_bid_t *main_parent;
+ apr_hash_index_t *hi;
+
+ SVN_ERR(svn_branch__state_get_history(branch, &history, scratch_pool));
+
+ SVN_ERR(find_branch_main_parent(branch, &main_parent, scratch_pool));
+ if (main_parent)
+ {
+ if (strcmp(main_parent->bid, branch->bid) == 0)
+ {
+ svnmover_notify(" main parent: r%ld.%s",
+ main_parent->rev, main_parent->bid);
+ }
+ else
+ {
+ svnmover_notify(" main parent (branched from): r%ld.%s",
+ main_parent->rev, main_parent->bid);
+ }
+ }
+ for (hi = apr_hash_first(scratch_pool, history->parents);
+ hi; hi = apr_hash_next(hi))
+ {
+ svn_branch__rev_bid_t *parent = apr_hash_this_val(hi);
+
+ if (! svn_branch__rev_bid_equal(parent, main_parent))
+ {
+ svnmover_notify(" other parent (complete merge): r%ld.%s",
+ parent->rev, parent->bid);
+ }
+ }
+
+ return SVN_NO_ERROR;
+}
+
+/* Show info about element E.
+ *
+ * TODO: Show different info for a repo element versus a WC element.
+ */
+static svn_error_t *
+do_info(svnmover_wc_t *wc,
+ svn_branch__el_rev_id_t *e,
+ apr_pool_t *scratch_pool)
+{
+ svnmover_notify("Element Id: %d%s",
+ e->eid,
+ is_branch_root_element(e->branch, e->eid)
+ ? " (branch root)" : "");
+
+ /* Show WC info for a WC working element, or repo info for a repo element */
+ if (e->rev == SVN_INVALID_REVNUM)
+ {
+ svn_branch__state_t *base_branch, *work_branch;
+ svn_revnum_t base_rev;
+ svn_element__content_t *e_base, *e_work;
+ svn_boolean_t is_modified;
+
+ base_branch = svn_branch__txn_get_branch_by_id(
+ wc->base->branch->txn, e->branch->bid, scratch_pool);
+ work_branch = svn_branch__txn_get_branch_by_id(
+ wc->working->branch->txn, e->branch->bid, scratch_pool);
+ base_rev = svnmover_wc_get_base_rev(wc, base_branch, e->eid, scratch_pool);
+ SVN_ERR(svn_branch__state_get_element(base_branch, &e_base,
+ e->eid, scratch_pool));
+ SVN_ERR(svn_branch__state_get_element(work_branch, &e_work,
+ e->eid, scratch_pool));
+ is_modified = !svn_element__content_equal(e_base, e_work,
+ scratch_pool);
+
+ svnmover_notify("Base Revision: %ld", base_rev);
+ svnmover_notify("Base Branch: %s", base_branch->bid);
+ svnmover_notify("Working Branch: %s", work_branch->bid);
+ svnmover_notify("Modified: %s", is_modified ? "yes" : "no");
+ }
+ else
+ {
+ svnmover_notify("Revision: %ld", e->rev);
+ svnmover_notify("Branch: %s", e->branch->bid);
+ }
+
+ return SVN_NO_ERROR;
+}
+
+
+typedef struct arg_t
+{
+ const char *path_name;
+ svn_branch__el_rev_id_t *el_rev, *parent_el_rev;
+} arg_t;
+
+#define VERIFY_REV_SPECIFIED(op, i) \
+ if (arg[i]->el_rev->rev == SVN_INVALID_REVNUM) \
+ return svn_error_createf(SVN_BRANCH__ERR, NULL, \
+ _("%s: '%s': revision number required"), \
+ op, action->relpath[i]);
+
+#define VERIFY_REV_UNSPECIFIED(op, i) \
+ if (arg[i]->el_rev->rev != SVN_INVALID_REVNUM) \
+ return svn_error_createf(SVN_BRANCH__ERR, NULL, \
+ _("%s: '%s@...': revision number not allowed"), \
+ op, action->relpath[i]);
+
+#define VERIFY_EID_NONEXISTENT(op, i) \
+ if (arg[i]->el_rev->eid != -1) \
+ return svn_error_createf(SVN_BRANCH__ERR, NULL, \
+ _("%s: Element already exists at path '%s'"), \
+ op, action->relpath[i]);
+
+#define VERIFY_EID_EXISTS(op, i) \
+ if (arg[i]->el_rev->eid == -1) \
+ return svn_error_createf(SVN_BRANCH__ERR, NULL, \
+ _("%s: Element not found at path '%s%s'"), \
+ op, action->relpath[i], \
+ action->rev_spec[i].kind == svn_opt_revision_unspecified \
+ ? "" : "@...");
+
+#define VERIFY_PARENT_EID_EXISTS(op, i) \
+ if (arg[i]->parent_el_rev->eid == -1) \
+ return svn_error_createf(SVN_BRANCH__ERR, NULL, \
+ _("%s: Element not found at path '%s'"), \
+ op, svn_relpath_dirname(action->relpath[i], pool));
+
+#define VERIFY_NOT_CHILD_OF_SELF(op, i, j, pool) \
+ if (svn_relpath_skip_ancestor( \
+ svn_branch__get_rrpath_by_eid(arg[i]->el_rev->branch, \
+ arg[i]->el_rev->eid, pool), \
+ svn_branch__get_rrpath_by_eid(arg[j]->parent_el_rev->branch, \
+ arg[j]->parent_el_rev->eid, pool))) \
+ return svn_error_createf(SVN_BRANCH__ERR, NULL, \
+ _("%s: The specified target is nested " \
+ "inside the source"), op);
+
+/* If EL_REV specifies the root element of a nested branch, change EL_REV
+ * to specify the corresponding subbranch-root element of its outer branch.
+ *
+ * If EL_REV specifies the root element of a top-level branch, return an
+ * error.
+ */
+static svn_error_t *
+point_to_outer_element_instead(svn_branch__el_rev_id_t *el_rev,
+ const char *op,
+ apr_pool_t *scratch_pool)
+{
+ if (is_branch_root_element(el_rev->branch, el_rev->eid))
+ {
+ svn_branch__state_t *outer_branch;
+ int outer_eid;
+
+ svn_branch__get_outer_branch_and_eid(&outer_branch, &outer_eid,
+ el_rev->branch, scratch_pool);
+
+ if (! outer_branch)
+ return svn_error_createf(SVN_BRANCH__ERR, NULL, "%s: %s", op,
+ _("svnmover cannot delete or move a "
+ "top-level branch"));
+
+ el_rev->eid = outer_eid;
+ el_rev->branch = outer_branch;
+ }
+
+ return SVN_NO_ERROR;
+}
+
+static svn_error_t *
+execute(svnmover_wc_t *wc,
+ const apr_array_header_t *actions,
+ const char *anchor_url,
+ apr_hash_t *revprops,
+ svn_client_ctx_t *ctx,
+ apr_pool_t *pool)
+{
+ const char *base_relpath;
+ apr_pool_t *iterpool = svn_pool_create(pool);
+ int i;
+
+ base_relpath = svn_uri_skip_ancestor(wc->repos_root_url, anchor_url, pool);
+
+ for (i = 0; i < actions->nelts; ++i)
+ {
+ action_t *action = APR_ARRAY_IDX(actions, i, action_t *);
+ int j;
+ arg_t *arg[3] = { NULL, NULL, NULL };
+
+ svn_pool_clear(iterpool);
+
+ /* Before translating paths to/from elements, need a sequence point */
+ SVN_ERR(svn_branch__txn_sequence_point(wc->edit_txn, iterpool));
+
+ /* Convert each ACTION[j].{relpath, rev_spec} to
+ (EL_REV[j], PARENT_EL_REV[j], PATH_NAME[j], REVNUM[j]),
+ except for the local-path argument of a 'put' command. */
+ for (j = 0; j < 3; j++)
+ {
+ if (action->relpath[j]
+ && ! (action->action == ACTION_PUT_FILE && j == 0))
+ {
+ const char *rrpath, *parent_rrpath;
+
+ arg[j] = apr_palloc(iterpool, sizeof(*arg[j]));
+
+ rrpath = svn_relpath_join(base_relpath, action->relpath[j], iterpool);
+ parent_rrpath = svn_relpath_dirname(rrpath, iterpool);
+
+ arg[j]->path_name = svn_relpath_basename(rrpath, NULL);
+ SVN_ERR(find_el_rev_by_rrpath_rev(&arg[j]->el_rev, wc,
+ &action->rev_spec[j],
+ action->branch_id[j],
+ rrpath,
+ iterpool, iterpool));
+ SVN_ERR(find_el_rev_by_rrpath_rev(&arg[j]->parent_el_rev, wc,
+ &action->rev_spec[j],
+ action->branch_id[j],
+ parent_rrpath,
+ iterpool, iterpool));
+ }
+ }
+
+ switch (action->action)
+ {
+ case ACTION_INFO_WC:
+ {
+ svn_boolean_t is_modified;
+ svn_revnum_t base_rev_min, base_rev_max;
+
+ SVN_ERR(txn_is_changed(wc->working->branch->txn, &is_modified,
+ iterpool));
+ SVN_ERR(svnmover_wc_get_base_revs(wc, &base_rev_min, &base_rev_max,
+ iterpool));
+
+ svnmover_notify("Repository Root: %s", wc->repos_root_url);
+ if (base_rev_min == base_rev_max)
+ svnmover_notify("Base Revision: %ld", base_rev_min);
+ else
+ svnmover_notify("Base Revisions: %ld to %ld",
+ base_rev_min, base_rev_max);
+ svnmover_notify("Base Branch: %s", wc->base->branch->bid);
+ svnmover_notify("Working Branch: %s", wc->working->branch->bid);
+ SVN_ERR(show_branch_history(wc->working->branch, iterpool));
+ svnmover_notify("Modified: %s", is_modified ? "yes" : "no");
+ }
+ break;
+
+ case ACTION_INFO:
+ VERIFY_EID_EXISTS("info", 0);
+ {
+ /* If it's a nested branch root, show info for the outer element
+ first, and then for the inner element. */
+ if (is_branch_root_element(arg[0]->el_rev->branch,
+ arg[0]->el_rev->eid))
+ {
+ svn_branch__state_t *outer_branch;
+ int outer_eid;
+
+ svn_branch__get_outer_branch_and_eid(&outer_branch, &outer_eid,
+ arg[0]->el_rev->branch,
+ iterpool);
+ if (outer_branch)
+ {
+ svn_branch__el_rev_id_t *outer_e
+ = svn_branch__el_rev_id_create(outer_branch, outer_eid,
+ arg[0]->el_rev->rev,
+ iterpool);
+ SVN_ERR(do_info(wc, outer_e, iterpool));
+ }
+ }
+ SVN_ERR(do_info(wc, arg[0]->el_rev, iterpool));
+ }
+ break;
+
+ case ACTION_LIST_CONFLICTS:
+ {
+ if (svnmover_any_conflicts(wc->conflicts))
+ {
+ SVN_ERR(svnmover_display_conflicts(wc->conflicts, iterpool));
+ }
+ }
+ break;
+
+ case ACTION_RESOLVED_CONFLICT:
+ {
+ if (svnmover_any_conflicts(wc->conflicts))
+ {
+ SVN_ERR(svnmover_conflict_resolved(wc->conflicts,
+ action->relpath[0],
+ iterpool));
+ }
+ else
+ {
+ return svn_error_create(SVN_BRANCH__ERR, NULL,
+ _("No conflicts are currently flagged"));
+ }
+ }
+ break;
+
+ case ACTION_DIFF:
+ VERIFY_EID_EXISTS("diff", 0);
+ VERIFY_EID_EXISTS("diff", 1);
+ {
+ SVN_ERR(branch_diff_r(arg[0]->el_rev /*from*/,
+ arg[1]->el_rev /*to*/,
+ show_subtree_diff, "",
+ iterpool));
+ }
+ break;
+
+ case ACTION_STATUS:
+ {
+ svn_branch__el_rev_id_t *from, *to;
+
+ from = svn_branch__el_rev_id_create(wc->base->branch,
+ svn_branch__root_eid(wc->base->branch),
+ SVN_INVALID_REVNUM, iterpool);
+ to = svn_branch__el_rev_id_create(wc->working->branch,
+ svn_branch__root_eid(wc->working->branch),
+ SVN_INVALID_REVNUM, iterpool);
+ SVN_ERR(branch_diff_r(from, to,
+ show_subtree_diff, "",
+ iterpool));
+ }
+ break;
+
+ case ACTION_LOG:
+ VERIFY_EID_EXISTS("log", 0);
+ VERIFY_EID_EXISTS("log", 1);
+ {
+ SVN_ERR(do_log(arg[0]->el_rev /*from*/,
+ arg[1]->el_rev /*to*/,
+ iterpool));
+ }
+ break;
+
+ case ACTION_LIST_BRANCHES:
+ {
+ VERIFY_EID_EXISTS("branches", 0);
+ if (the_ui_mode == UI_MODE_PATHS)
+ {
+ svnmover_notify_v("branches rooted at same element as '%s':",
+ action->relpath[0]);
+ }
+ else
+ {
+ svnmover_notify_v("branches rooted at e%d:",
+ arg[0]->el_rev->eid);
+ }
+ SVN_ERR(list_branches(
+ arg[0]->el_rev->branch->txn,
+ arg[0]->el_rev->eid,
+ FALSE, iterpool));
+ }
+ break;
+
+ case ACTION_LIST_BRANCHES_R:
+ {
+ if (the_ui_mode == UI_MODE_SERIAL)
+ {
+ svn_stream_t *stream;
+ SVN_ERR(svn_stream_for_stdout(&stream, iterpool));
+ SVN_ERR(svn_branch__txn_serialize(wc->working->branch->txn,
+ stream,
+ iterpool));
+ }
+ else
+ {
+ /* Note: BASE_REVISION is always a real revision number, here */
+ SVN_ERR(list_all_branches(wc->working->branch->txn, TRUE,
+ iterpool));
+ }
+ }
+ break;
+
+ case ACTION_LS:
+ {
+ VERIFY_EID_EXISTS("ls", 0);
+ if (the_ui_mode == UI_MODE_PATHS)
+ {
+ SVN_ERR(list_branch_elements(arg[0]->el_rev->branch, iterpool));
+ }
+ else if (the_ui_mode == UI_MODE_EIDS)
+ {
+ SVN_ERR(list_branch_elements_by_eid(arg[0]->el_rev->branch,
+ iterpool));
+ }
+ else
+ {
+ svn_stream_t *stream;
+ SVN_ERR(svn_stream_for_stdout(&stream, iterpool));
+ SVN_ERR(svn_branch__state_serialize(stream,
+ arg[0]->el_rev->branch,
+ iterpool));
+ }
+ }
+ break;
+
+ case ACTION_TBRANCH:
+ VERIFY_EID_EXISTS("tbranch", 0);
+ {
+ const char *from_branch_id = svn_branch__get_id(arg[0]->el_rev->branch,
+ iterpool);
+ svn_branch__rev_bid_eid_t *from
+ = svn_branch__rev_bid_eid_create(arg[0]->el_rev->rev, from_branch_id,
+ arg[0]->el_rev->eid, iterpool);
+ svn_branch__state_t *new_branch;
+
+ SVN_ERR(do_topbranch(&new_branch, wc->edit_txn,
+ from,
+ iterpool, iterpool));
+ /* Switch the WC working state to this new branch */
+ wc->working->branch = new_branch;
+ }
+ break;
+
+ case ACTION_BRANCH:
+ VERIFY_EID_EXISTS("branch", 0);
+ VERIFY_REV_UNSPECIFIED("branch", 1);
+ VERIFY_EID_NONEXISTENT("branch", 1);
+ VERIFY_PARENT_EID_EXISTS("branch", 1);
+ {
+ const char *from_branch_id = svn_branch__get_id(arg[0]->el_rev->branch,
+ iterpool);
+ svn_branch__rev_bid_eid_t *from
+ = svn_branch__rev_bid_eid_create(arg[0]->el_rev->rev, from_branch_id,
+ arg[0]->el_rev->eid, iterpool);
+ svn_branch__state_t *new_branch;
+
+ SVN_ERR(do_branch(&new_branch, wc->edit_txn,
+ from,
+ arg[1]->el_rev->branch, arg[1]->parent_el_rev->eid,
+ arg[1]->path_name,
+ iterpool, iterpool));
+ }
+ break;
+
+ case ACTION_BRANCH_INTO:
+ VERIFY_EID_EXISTS("branch-into", 0);
+ VERIFY_REV_UNSPECIFIED("branch-into", 1);
+ VERIFY_EID_NONEXISTENT("branch-into", 1);
+ VERIFY_PARENT_EID_EXISTS("branch-into", 1);
+ {
+ SVN_ERR(do_branch_into(arg[0]->el_rev->branch, arg[0]->el_rev->eid,
+ arg[1]->el_rev->branch,
+ arg[1]->parent_el_rev->eid, arg[1]->path_name,
+ iterpool));
+ }
+ break;
+
+ case ACTION_MKBRANCH:
+ VERIFY_REV_UNSPECIFIED("mkbranch", 0);
+ VERIFY_EID_NONEXISTENT("mkbranch", 0);
+ VERIFY_PARENT_EID_EXISTS("mkbranch", 0);
+ {
+ apr_hash_t *props = apr_hash_make(iterpool);
+ svn_element__payload_t *payload
+ = svn_element__payload_create_dir(props, iterpool);
+
+ SVN_ERR(do_mkbranch(NULL, wc->edit_txn,
+ arg[0]->parent_el_rev->branch,
+ arg[0]->parent_el_rev->eid, arg[0]->path_name,
+ payload, iterpool));
+ }
+ break;
+
+ case ACTION_MERGE3:
+ {
+ VERIFY_EID_EXISTS("merge", 0);
+ VERIFY_EID_EXISTS("merge", 1);
+ VERIFY_REV_UNSPECIFIED("merge", 1);
+ VERIFY_EID_EXISTS("merge", 2);
+
+ SVN_ERR(do_merge(wc,
+ arg[0]->el_rev /*from*/,
+ arg[1]->el_rev /*to*/,
+ arg[2]->el_rev /*yca*/,
+ iterpool));
+ }
+ break;
+
+ case ACTION_AUTO_MERGE:
+ {
+ VERIFY_EID_EXISTS("merge", 0);
+ VERIFY_EID_EXISTS("merge", 1);
+ VERIFY_REV_UNSPECIFIED("merge", 1);
+
+ SVN_ERR(do_auto_merge(wc,
+ arg[0]->el_rev /*from*/,
+ arg[1]->el_rev /*to*/,
+ iterpool));
+ }
+ break;
+
+ case ACTION_MV:
+ SVN_ERR(point_to_outer_element_instead(arg[0]->el_rev, "mv",
+ iterpool));
+
+ VERIFY_REV_UNSPECIFIED("mv", 0);
+ VERIFY_EID_EXISTS("mv", 0);
+ VERIFY_REV_UNSPECIFIED("mv", 1);
+ VERIFY_EID_NONEXISTENT("mv", 1);
+ VERIFY_PARENT_EID_EXISTS("mv", 1);
+ VERIFY_NOT_CHILD_OF_SELF("mv", 0, 1, iterpool);
+
+ /* Simple move/rename within same branch, if possible */
+ if (BRANCH_IS_SAME_BRANCH(arg[1]->parent_el_rev->branch,
+ arg[0]->el_rev->branch,
+ iterpool))
+ {
+ SVN_ERR(do_move(arg[0]->el_rev,
+ arg[1]->parent_el_rev, arg[1]->path_name,
+ iterpool));
+ }
+ else
+ {
+ SVN_ERR(do_interactive_cross_branch_move(wc->edit_txn,
+ arg[0]->el_rev,
+ arg[1]->parent_el_rev,
+ arg[1]->path_name,
+ iterpool));
+ }
+ break;
+
+ case ACTION_CP:
+ VERIFY_REV_SPECIFIED("cp", 0);
+ /* (Or do we want to support copying from "this txn" too?) */
+ VERIFY_EID_EXISTS("cp", 0);
+ VERIFY_REV_UNSPECIFIED("cp", 1);
+ VERIFY_EID_NONEXISTENT("cp", 1);
+ VERIFY_PARENT_EID_EXISTS("cp", 1);
+ SVN_ERR(do_copy(arg[0]->el_rev,
+ arg[1]->parent_el_rev->branch,
+ arg[1]->parent_el_rev->eid, arg[1]->path_name,
+ iterpool));
+ break;
+
+ case ACTION_RM:
+ SVN_ERR(point_to_outer_element_instead(arg[0]->el_rev, "rm",
+ iterpool));
+
+ VERIFY_REV_UNSPECIFIED("rm", 0);
+ VERIFY_EID_EXISTS("rm", 0);
+ SVN_ERR(do_delete(arg[0]->el_rev->branch, arg[0]->el_rev->eid,
+ iterpool));
+ break;
+
+ case ACTION_CP_RM:
+ SVN_ERR(point_to_outer_element_instead(arg[0]->el_rev,
+ "copy-and-delete", iterpool));
+
+ VERIFY_REV_UNSPECIFIED("copy-and-delete", 0);
+ VERIFY_EID_EXISTS("copy-and-delete", 0);
+ VERIFY_REV_UNSPECIFIED("copy-and-delete", 1);
+ VERIFY_EID_NONEXISTENT("copy-and-delete", 1);
+ VERIFY_PARENT_EID_EXISTS("copy-and-delete", 1);
+ VERIFY_NOT_CHILD_OF_SELF("copy-and-delete", 0, 1, iterpool);
+
+ SVN_ERR(do_copy_and_delete(arg[0]->el_rev,
+ arg[1]->parent_el_rev->branch,
+ arg[1]->parent_el_rev->eid,
+ arg[1]->path_name,
+ iterpool));
+ break;
+
+ case ACTION_BR_RM:
+ SVN_ERR(point_to_outer_element_instead(arg[0]->el_rev,
+ "branch-and-delete",
+ iterpool));
+
+ VERIFY_REV_UNSPECIFIED("branch-and-delete", 0);
+ VERIFY_EID_EXISTS("branch-and-delete", 0);
+ VERIFY_REV_UNSPECIFIED("branch-and-delete", 1);
+ VERIFY_EID_NONEXISTENT("branch-and-delete", 1);
+ VERIFY_PARENT_EID_EXISTS("branch-and-delete", 1);
+ VERIFY_NOT_CHILD_OF_SELF("branch-and-delete", 0, 1, iterpool);
+
+ SVN_ERR(do_branch_and_delete(wc->edit_txn,
+ arg[0]->el_rev,
+ arg[1]->parent_el_rev->branch,
+ arg[1]->parent_el_rev->eid,
+ arg[1]->path_name,
+ iterpool));
+ break;
+
+ case ACTION_BR_INTO_RM:
+ SVN_ERR(point_to_outer_element_instead(arg[0]->el_rev,
+ "branch-into-and-delete",
+ iterpool));
+
+ VERIFY_REV_UNSPECIFIED("branch-into-and-delete", 0);
+ VERIFY_EID_EXISTS("branch-into-and-delete", 0);
+ VERIFY_REV_UNSPECIFIED("branch-into-and-delete", 1);
+ VERIFY_EID_NONEXISTENT("branch-into-and-delete", 1);
+ VERIFY_PARENT_EID_EXISTS("branch-into-and-delete", 1);
+ VERIFY_NOT_CHILD_OF_SELF("branch-into-and-delete", 0, 1, iterpool);
+
+ SVN_ERR(do_branch_into_and_delete(arg[0]->el_rev,
+ arg[1]->parent_el_rev->branch,
+ arg[1]->parent_el_rev->eid,
+ arg[1]->path_name,
+ iterpool));
+ break;
+
+ case ACTION_MKDIR:
+ VERIFY_REV_UNSPECIFIED("mkdir", 0);
+ VERIFY_EID_NONEXISTENT("mkdir", 0);
+ VERIFY_PARENT_EID_EXISTS("mkdir", 0);
+ SVN_ERR(do_mkdir(wc->edit_txn,
+ arg[0]->parent_el_rev->branch,
+ arg[0]->parent_el_rev->eid, arg[0]->path_name,
+ iterpool));
+ break;
+
+ case ACTION_PUT_FILE:
+ VERIFY_REV_UNSPECIFIED("put", 1);
+ VERIFY_PARENT_EID_EXISTS("put", 1);
+ SVN_ERR(do_put_file(wc->edit_txn,
+ action->relpath[0],
+ arg[1]->el_rev,
+ arg[1]->parent_el_rev,
+ arg[1]->path_name,
+ iterpool));
+ break;
+
+ case ACTION_CAT:
+ VERIFY_EID_EXISTS("rm", 0);
+ SVN_ERR(do_cat(arg[0]->el_rev,
+ iterpool));
+ break;
+
+ case ACTION_COMMIT:
+ {
+ svn_revnum_t new_rev;
+
+ SVN_ERR(do_commit(&new_rev, wc, revprops, iterpool));
+ if (! SVN_IS_VALID_REVNUM(new_rev))
+ {
+ svnmover_notify_v("There are no changes to commit.");
+ }
+ }
+ break;
+
+ case ACTION_UPDATE:
+ /* ### If current WC branch doesn't exist in target rev, should
+ 'update' follow to a different branch? By following merge graph?
+ Presently it would try to update to a state of nonexistence. */
+ /* path (or eid) is currently required for syntax, but ignored */
+ VERIFY_EID_EXISTS("update", 0);
+ /* We require a rev to be specified because an unspecified rev
+ currently always means 'working version', whereas we would
+ want it to mean 'head' for this subcommand. */
+ VERIFY_REV_SPECIFIED("update", 0);
+ {
+ SVN_ERR(do_switch(wc, arg[0]->el_rev->rev, wc->base->branch,
+ iterpool));
+ }
+ break;
+
+ case ACTION_SWITCH:
+ VERIFY_EID_EXISTS("switch", 0);
+ {
+ SVN_ERR(do_switch(wc, arg[0]->el_rev->rev, arg[0]->el_rev->branch,
+ iterpool));
+ }
+ break;
+
+ case ACTION_REVERT:
+ {
+ SVN_ERR(do_revert(wc, iterpool));
+ }
+ break;
+
+ case ACTION_MIGRATE:
+ /* path (or eid) is currently required for syntax, but ignored */
+ VERIFY_EID_EXISTS("migrate", 0);
+ VERIFY_REV_SPECIFIED("migrate", 0);
+ {
+ SVN_ERR(do_migrate(wc,
+ arg[0]->el_rev->rev, arg[0]->el_rev->rev,
+ iterpool));
+ }
+ break;
+
+ default:
+ SVN_ERR_MALFUNCTION();
+ }
+
+ if (action->action != ACTION_COMMIT)
+ {
+ wc->list_of_commands
+ = apr_psprintf(pool, "%s%s\n",
+ wc->list_of_commands ? wc->list_of_commands : "",
+ svn_cstring_join2(action->action_args, " ",
+ TRUE, pool));
+ }
+ }
+ svn_pool_destroy(iterpool);
+ return SVN_NO_ERROR;
+}
+
+/* Perform the typical suite of manipulations for user-provided URLs
+ on URL, returning the result (allocated from POOL): IRI-to-URI
+ conversion, auto-escaping, and canonicalization. */
+static const char *
+sanitize_url(const char *url,
+ apr_pool_t *pool)
+{
+ url = svn_path_uri_from_iri(url, pool);
+ url = svn_path_uri_autoescape(url, pool);
+ return svn_uri_canonicalize(url, pool);
+}
+
+static const char *
+help_for_subcommand(const action_defn_t *action, apr_pool_t *pool)
+{
+ const char *cmd = apr_psprintf(pool, "%s %s",
+ action->name, action->args_help);
+
+ return apr_psprintf(pool, " %-22s : %s\n", cmd, action->help);
+}
+
+/* Print a usage message on STREAM, listing only the actions. */
+static void
+usage_actions_only(FILE *stream, apr_pool_t *pool)
+{
+ int i;
+
+ for (i = 0; i < sizeof (action_defn) / sizeof (action_defn[0]); i++)
+ svn_error_clear(svn_cmdline_fputs(
+ help_for_subcommand(&action_defn[i], pool),
+ stream, pool));
+}
+
+/* Print a usage message on STREAM. */
+static void
+usage(FILE *stream, apr_pool_t *pool)
+{
+ svn_error_clear(svn_cmdline_fputs(
+ _("usage: svnmover -U REPO_URL [ACTION...]\n"
+ "A client for experimenting with move tracking.\n"
+ "\n"
+ " Commit a batch of ACTIONs to a Subversion repository, as a single\n"
+ " new revision. With no ACTIONs specified, read actions interactively\n"
+ " from standard input, until EOF or ^C, and then commit the result.\n"
+ "\n"
+ " Action arguments are of the form\n"
+ " [^B<branch-id>/]<path>[@<revnum>]\n"
+ " where\n"
+ " <branch-id> defaults to the working branch or, when <revnum> is\n"
+ " given, to the base branch\n"
+ " <path> is a path relative to the branch\n"
+ " <revnum> is the revision number, when making a historic reference\n"
+ "\n"
+ " Move tracking metadata is stored in the repository, in on-disk files\n"
+ " for RA-local or in revprops otherwise.\n"
+ "\n"
+ "Actions:\n"),
+ stream, pool));
+ usage_actions_only(stream, pool);
+ svn_error_clear(svn_cmdline_fputs(
+ _("\n"
+ "Valid options:\n"
+ " --ui={eids|e|paths|p} : display information as elements or as paths\n"
+ " --colo[u]r={always|never|auto}\n"
+ " : use coloured output; 'auto' means when standard\n"
+ " output goes to a terminal; default: never\n"
+ " -h, -? [--help] : display this text\n"
+ " -v [--verbose] : display debugging messages\n"
+ " -q [--quiet] : suppress notifications\n"
+ " -m [--message] ARG : use ARG as a log message\n"
+ " -F [--file] ARG : read log message from file ARG\n"
+ " -u [--username] ARG : commit the changes as username ARG\n"
+ " -p [--password] ARG : use ARG as the password\n"
+ " -U [--root-url] ARG : interpret all action URLs relative to ARG\n"
+ " -r [--revision] ARG : use revision ARG as baseline for changes\n"
+ " -B [--branch-id] ARG : work on the branch identified by ARG\n"
+ " --with-revprop ARG : set revision property in the following format:\n"
+ " NAME[=VALUE]\n"
+ " --non-interactive : do no interactive prompting (default is to\n"
+ " prompt only if standard input is a terminal)\n"
+ " --force-interactive : do interactive prompting even if standard\n"
+ " input is not a terminal\n"
+ " --trust-server-cert : accept SSL server certificates from unknown\n"
+ " certificate authorities without prompting (but\n"
+ " only with '--non-interactive')\n"
+ " -X [--extra-args] ARG : append arguments from file ARG (one per line;\n"
+ " use \"-\" to read from standard input)\n"
+ " --config-dir ARG : use ARG to override the config directory\n"
+ " --config-option ARG : use ARG to override a configuration option\n"
+ " --no-auth-cache : do not cache authentication tokens\n"
+ " --version : print version information\n"),
+ stream, pool));
+}
+
+static svn_error_t *
+insufficient(int i, apr_pool_t *pool)
+{
+ return svn_error_createf(SVN_ERR_INCORRECT_PARAMS, NULL,
+ "insufficient arguments:\n"
+ "%s",
+ help_for_subcommand(&action_defn[i], pool));
+}
+
+static svn_error_t *
+display_version(apr_getopt_t *os, svn_boolean_t _quiet, apr_pool_t *pool)
+{
+ const char *ra_desc_start
+ = "The following repository access (RA) modules are available:\n\n";
+ svn_stringbuf_t *version_footer;
+
+ version_footer = svn_stringbuf_create(ra_desc_start, pool);
+ SVN_ERR(svn_ra_print_modules(version_footer, pool));
+
+ SVN_ERR(svn_opt_print_help4(NULL, "svnmover", TRUE, _quiet, FALSE,
+ version_footer->data,
+ NULL, NULL, NULL, NULL, NULL, pool));
+
+ return SVN_NO_ERROR;
+}
+
+/* Return an error about the mutual exclusivity of the -m, -F, and
+ --with-revprop=svn:log command-line options. */
+static svn_error_t *
+mutually_exclusive_logs_error(void)
+{
+ return svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("--message (-m), --file (-F), and "
+ "--with-revprop=svn:log are mutually "
+ "exclusive"));
+}
+
+/* Obtain the log message from multiple sources, producing an error
+ if there are multiple sources. Store the result in *FINAL_MESSAGE. */
+static svn_error_t *
+get_log_message(const char **final_message,
+ const char *message,
+ apr_hash_t *revprops,
+ svn_stringbuf_t *filedata,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool)
+{
+ svn_string_t *msg;
+
+ *final_message = NULL;
+ /* If we already have a log message in the revprop hash, then just
+ make sure the user didn't try to also use -m or -F. Otherwise,
+ we need to consult -m or -F to find a log message, if any. */
+ msg = svn_hash_gets(revprops, SVN_PROP_REVISION_LOG);
+ if (msg)
+ {
+ if (filedata || message)
+ return mutually_exclusive_logs_error();
+
+ /* Remove it from the revprops; it will be re-added later */
+ svn_hash_sets(revprops, SVN_PROP_REVISION_LOG, NULL);
+ }
+ else if (filedata)
+ {
+ if (message)
+ return mutually_exclusive_logs_error();
+
+ msg = svn_string_create(filedata->data, scratch_pool);
+ }
+ else if (message)
+ {
+ msg = svn_string_create(message, scratch_pool);
+ }
+
+ if (msg)
+ {
+ SVN_ERR_W(svn_subst_translate_string2(&msg, NULL, NULL,
+ msg, NULL, FALSE,
+ result_pool, scratch_pool),
+ _("Error normalizing log message to internal format"));
+
+ *final_message = msg->data;
+ }
+
+ return SVN_NO_ERROR;
+}
+
+static const char *const special_commands[] =
+{
+ "help",
+ "--verbose",
+ "--ui=paths", "--ui=eids", "--ui=serial",
+};
+
+/* Parse the action arguments into action structures. */
+static svn_error_t *
+parse_actions(apr_array_header_t **actions,
+ apr_array_header_t *action_args,
+ apr_pool_t *pool)
+{
+ int i;
+
+ *actions = apr_array_make(pool, 1, sizeof(action_t *));
+
+ for (i = 0; i < action_args->nelts; ++i)
+ {
+ int j, k, num_url_args;
+ const char *action_string = APR_ARRAY_IDX(action_args, i, const char *);
+ action_t *action = apr_pcalloc(pool, sizeof(*action));
+ const char *cp_from_rev = NULL;
+
+ /* First, parse the action. Handle some special actions immediately;
+ handle normal subcommands by looking them up in the table. */
+ if (! strcmp(action_string, "?") || ! strcmp(action_string, "h")
+ || ! strcmp(action_string, "help"))
+ {
+ usage_actions_only(stdout, pool);
+ return SVN_NO_ERROR;
+ }
+ if (! strncmp(action_string, "--ui=", 5))
+ {
+ SVN_ERR(svn_token__from_word_err(&the_ui_mode, ui_mode_map,
+ action_string + 5));
+ continue;
+ }
+ if (! strcmp(action_string, "--verbose")
+ || ! strcmp(action_string, "-v"))
+ {
+ quiet = !quiet;
+ svnmover_notify("verbose mode %s", quiet ? "off" : "on");
+ continue;
+ }
+ for (j = 0; j < sizeof(action_defn) / sizeof(action_defn[0]); j++)
+ {
+ if (strcmp(action_string, action_defn[j].name) == 0)
+ {
+ action->action = action_defn[j].code;
+ num_url_args = action_defn[j].num_args;
+ break;
+ }
+ }
+ if (j == sizeof(action_defn) / sizeof(action_defn[0]))
+ return svn_error_createf(SVN_ERR_INCORRECT_PARAMS, NULL,
+ "'%s' is not an action; try 'help'.",
+ action_string);
+
+ action->action_args = apr_array_make(pool, 0, sizeof(const char *));
+ APR_ARRAY_PUSH(action->action_args, const char *) = action_string;
+
+ if (action->action == ACTION_CP)
+ {
+ /* next argument is the copy source revision */
+ if (++i == action_args->nelts)
+ return svn_error_trace(insufficient(j, pool));
+ cp_from_rev = APR_ARRAY_IDX(action_args, i, const char *);
+ APR_ARRAY_PUSH(action->action_args, const char *) = cp_from_rev;
+ }
+
+ /* Parse the required number of URLs. */
+ for (k = 0; k < num_url_args; ++k)
+ {
+ const char *path;
+
+ if (++i == action_args->nelts)
+ return svn_error_trace(insufficient(j, pool));
+ path = APR_ARRAY_IDX(action_args, i, const char *);
+ APR_ARRAY_PUSH(action->action_args, const char *) = path;
+
+ if (cp_from_rev && k == 0)
+ {
+ path = apr_psprintf(pool, "%s@%s", path, cp_from_rev);
+ }
+
+ SVN_ERR(svn_opt_parse_path(&action->rev_spec[k], &path, path, pool));
+
+ /* If there's an ANCHOR_URL, we expect URL to be a path
+ relative to ANCHOR_URL (and we build a full url from the
+ combination of the two). Otherwise, it should be a full
+ url. */
+ if (svn_path_is_url(path))
+ {
+ return svn_error_createf(SVN_ERR_INCORRECT_PARAMS, NULL,
+ "Argument '%s' is a URL; use "
+ "--root-url (-U) instead", path);
+ }
+ /* Parse "^B<branch-id>/path" syntax. */
+ if (strncmp("^B", path, 2) == 0)
+ {
+ const char *slash = strchr(path, '/');
+
+ action->branch_id[k]
+ = slash ? apr_pstrndup(pool, path + 1, slash - (path + 1))
+ : path + 1;
+ path = slash ? slash + 1 : "";
+ }
+ /* These args must be relpaths, except for the 'local file' arg
+ of a 'put' command. */
+ if (! svn_relpath_is_canonical(path)
+ && ! (action->action == ACTION_PUT_FILE && k == 0))
+ {
+ return svn_error_createf(SVN_ERR_INCORRECT_PARAMS, NULL,
+ "Argument '%s' is not a relative path "
+ "or a URL", path);
+ }
+ action->relpath[k] = path;
+ }
+
+ APR_ARRAY_PUSH(*actions, action_t *) = action;
+ }
+
+ return SVN_NO_ERROR;
+}
+
+#ifdef HAVE_LINENOISE
+/* A command-line completion callback for the 'Line Noise' interactive
+ * prompting.
+ *
+ * This is called when the user presses the Tab key. It calculates the
+ * possible completions for the partial line BUF.
+ *
+ * ### So far, this only works on a single command keyword at the start
+ * of the line.
+ */
+static void
+linenoise_completion(const char *buf, linenoiseCompletions *lc)
+{
+ int i;
+
+ for (i = 0; i < sizeof(special_commands) / sizeof(special_commands[0]); i++)
+ {
+ /* Suggest each command that matches (and is longer than) what the
+ user has already typed. Add a space. */
+ if (strncmp(buf, special_commands[i], strlen(buf)) == 0
+ && strlen(special_commands[i]) > strlen(buf))
+ {
+ static char completion[100];
+
+ apr_cpystrn(completion, special_commands[i], 99);
+ strcat(completion, " ");
+ linenoiseAddCompletion(lc, completion);
+ }
+ }
+
+ for (i = 0; i < sizeof(action_defn) / sizeof(action_defn[0]); i++)
+ {
+ /* Suggest each command that matches (and is longer than) what the
+ user has already typed. Add a space. */
+ if (strncmp(buf, action_defn[i].name, strlen(buf)) == 0
+ && strlen(action_defn[i].name) > strlen(buf))
+ {
+ static char completion[100];
+
+ apr_cpystrn(completion, action_defn[i].name, 99);
+ strcat(completion, " ");
+ linenoiseAddCompletion(lc, completion);
+ }
+ }
+}
+#endif
+
+/* Display a prompt, read a line of input and split it into words.
+ *
+ * Set *WORDS to null if input is cancelled (by ctrl-C for example).
+ */
+static svn_error_t *
+read_words(apr_array_header_t **words,
+ const char *prompt,
+ apr_pool_t *result_pool)
+{
+ svn_error_t *err;
+ const char *input;
+
+ settext(TEXT_FG_YELLOW);
+ err = svnmover_prompt_user(&input, prompt, result_pool);
+ settext(TEXT_RESET);
+ if (err && (err->apr_err == SVN_ERR_CANCELLED || err->apr_err == APR_EOF))
+ {
+ *words = NULL;
+ svn_error_clear(err);
+ return SVN_NO_ERROR;
+ }
+ SVN_ERR(err);
+ *words = svn_cstring_split(input, " ", TRUE /*chop_whitespace*/, result_pool);
+
+ return SVN_NO_ERROR;
+}
+
+/*
+ * On success, leave *EXIT_CODE untouched and return SVN_NO_ERROR. On error,
+ * either return an error to be displayed, or set *EXIT_CODE to non-zero and
+ * return SVN_NO_ERROR.
+ */
+static svn_error_t *
+sub_main(int *exit_code, int argc, const char *argv[], apr_pool_t *pool)
+{
+ apr_array_header_t *actions;
+ svn_error_t *err = SVN_NO_ERROR;
+ apr_getopt_t *opts;
+ enum {
+ config_dir_opt = SVN_OPT_FIRST_LONGOPT_ID,
+ config_inline_opt,
+ no_auth_cache_opt,
+ version_opt,
+ with_revprop_opt,
+ non_interactive_opt,
+ force_interactive_opt,
+ trust_server_cert_opt,
+ trust_server_cert_failures_opt,
+ ui_opt,
+ colour_opt,
+ auth_password_from_stdin_opt
+ };
+ static const apr_getopt_option_t options[] = {
+ {"verbose", 'v', 0, ""},
+ {"quiet", 'q', 0, ""},
+ {"message", 'm', 1, ""},
+ {"file", 'F', 1, ""},
+ {"username", 'u', 1, ""},
+ {"password", 'p', 1, ""},
+ {"password-from-stdin", auth_password_from_stdin_opt, 1, ""},
+ {"root-url", 'U', 1, ""},
+ {"revision", 'r', 1, ""},
+ {"branch-id", 'B', 1, ""},
+ {"with-revprop", with_revprop_opt, 1, ""},
+ {"extra-args", 'X', 1, ""},
+ {"help", 'h', 0, ""},
+ {NULL, '?', 0, ""},
+ {"non-interactive", non_interactive_opt, 0, ""},
+ {"force-interactive", force_interactive_opt, 0, ""},
+ {"trust-server-cert", trust_server_cert_opt, 0, ""},
+ {"trust-server-cert-failures", trust_server_cert_failures_opt, 1, ""},
+ {"config-dir", config_dir_opt, 1, ""},
+ {"config-option", config_inline_opt, 1, ""},
+ {"no-auth-cache", no_auth_cache_opt, 0, ""},
+ {"version", version_opt, 0, ""},
+ {"ui", ui_opt, 1, ""},
+ {"colour", colour_opt, 1, ""},
+ {"color", colour_opt, 1, ""},
+ {NULL, 0, 0, NULL}
+ };
+ const char *message = NULL;
+ svn_stringbuf_t *filedata = NULL;
+ const char *username = NULL, *password = NULL;
+ const char *anchor_url = NULL, *extra_args_file = NULL;
+ const char *config_dir = NULL;
+ apr_array_header_t *config_options;
+ svn_boolean_t show_version = FALSE;
+ svn_boolean_t non_interactive = FALSE;
+ svn_boolean_t force_interactive = FALSE;
+ svn_boolean_t interactive_actions;
+ svn_boolean_t trust_unknown_ca = FALSE;
+ svn_boolean_t trust_cn_mismatch = FALSE;
+ svn_boolean_t trust_expired = FALSE;
+ svn_boolean_t trust_not_yet_valid = FALSE;
+ svn_boolean_t trust_other_failure = FALSE;
+ svn_boolean_t no_auth_cache = FALSE;
+ svn_revnum_t base_revision = SVN_INVALID_REVNUM;
+ const char *branch_id = "B0"; /* default branch */
+ apr_array_header_t *action_args;
+ apr_hash_t *revprops = apr_hash_make(pool);
+ apr_hash_t *cfg_hash;
+ svn_config_t *cfg_config;
+ svn_client_ctx_t *ctx;
+ const char *log_msg;
+ svn_tristate_t coloured_output = svn_tristate_false;
+ svnmover_wc_t *wc;
+ svn_boolean_t read_pass_from_stdin = FALSE;
+
+ /* Check library versions */
+ SVN_ERR(check_lib_versions());
+
+ config_options = apr_array_make(pool, 0,
+ sizeof(svn_cmdline__config_argument_t*));
+
+ apr_getopt_init(&opts, pool, argc, argv);
+ opts->interleave = 1;
+ while (1)
+ {
+ int opt;
+ const char *arg;
+ const char *opt_arg;
+
+ apr_status_t status = apr_getopt_long(opts, options, &opt, &arg);
+ if (APR_STATUS_IS_EOF(status))
+ break;
+ if (status != APR_SUCCESS)
+ return svn_error_wrap_apr(status, "getopt failure");
+ switch(opt)
+ {
+ case 'v':
+ quiet = FALSE;
+ break;
+ case 'q':
+ quiet = TRUE;
+ break;
+ case 'm':
+ SVN_ERR(svn_utf_cstring_to_utf8(&message, arg, pool));
+ break;
+ case 'F':
+ {
+ const char *filename;
+ SVN_ERR(svn_utf_cstring_to_utf8(&filename, arg, pool));
+ SVN_ERR(svn_stringbuf_from_file2(&filedata, filename, pool));
+ }
+ break;
+ case 'u':
+ username = apr_pstrdup(pool, arg);
+ break;
+ case 'p':
+ password = apr_pstrdup(pool, arg);
+ break;
+ case auth_password_from_stdin_opt:
+ read_pass_from_stdin = TRUE;
+ break;
+ case 'U':
+ SVN_ERR(svn_utf_cstring_to_utf8(&anchor_url, arg, pool));
+ if (! svn_path_is_url(anchor_url))
+ return svn_error_createf(SVN_ERR_INCORRECT_PARAMS, NULL,
+ "'%s' is not a URL", anchor_url);
+ anchor_url = sanitize_url(anchor_url, pool);
+ break;
+ case 'r':
+ {
+ const char *saved_arg = arg;
+ char *digits_end = NULL;
+ while (*arg == 'r')
+ arg++;
+ base_revision = strtol(arg, &digits_end, 10);
+ if ((! SVN_IS_VALID_REVNUM(base_revision))
+ || (! digits_end)
+ || *digits_end)
+ return svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("Invalid revision number '%s'"),
+ saved_arg);
+ }
+ break;
+ case 'B':
+ branch_id = (arg[0] == 'B') ? apr_pstrdup(pool, arg)
+ : apr_psprintf(pool, "B%s", arg);
+ break;
+ case with_revprop_opt:
+ SVN_ERR(svn_opt_parse_revprop(&revprops, arg, pool));
+ break;
+ case 'X':
+ SVN_ERR(svn_utf_cstring_to_utf8(&extra_args_file, arg, pool));
+ break;
+ case non_interactive_opt:
+ non_interactive = TRUE;
+ break;
+ case force_interactive_opt:
+ force_interactive = TRUE;
+ break;
+ case trust_server_cert_opt:
+ trust_unknown_ca = TRUE;
+ break;
+ case trust_server_cert_failures_opt:
+ SVN_ERR(svn_utf_cstring_to_utf8(&opt_arg, arg, pool));
+ SVN_ERR(svn_cmdline__parse_trust_options(
+ &trust_unknown_ca,
+ &trust_cn_mismatch,
+ &trust_expired,
+ &trust_not_yet_valid,
+ &trust_other_failure,
+ opt_arg, pool));
+ break;
+ case config_dir_opt:
+ SVN_ERR(svn_utf_cstring_to_utf8(&config_dir, arg, pool));
+ break;
+ case config_inline_opt:
+ SVN_ERR(svn_utf_cstring_to_utf8(&opt_arg, arg, pool));
+ SVN_ERR(svn_cmdline__parse_config_option(config_options, opt_arg,
+ "svnmover: ", pool));
+ break;
+ case no_auth_cache_opt:
+ no_auth_cache = TRUE;
+ break;
+ case version_opt:
+ show_version = TRUE;
+ break;
+ case ui_opt:
+ SVN_ERR(svn_utf_cstring_to_utf8(&opt_arg, arg, pool));
+ SVN_ERR(svn_token__from_word_err(&the_ui_mode, ui_mode_map, opt_arg));
+ break;
+ case colour_opt:
+ if (strcmp(arg, "always") == 0)
+ coloured_output = svn_tristate_true;
+ else if (strcmp(arg, "never") == 0)
+ coloured_output = svn_tristate_false;
+ else if (strcmp(arg, "auto") == 0)
+ coloured_output = svn_tristate_unknown;
+ else
+ return svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("Bad argument in '--colour=%s': "
+ "use one of 'always', 'never', 'auto'"),
+ arg);
+ break;
+ case 'h':
+ case '?':
+ usage(stdout, pool);
+ return SVN_NO_ERROR;
+ }
+ }
+
+ if (show_version)
+ {
+ SVN_ERR(display_version(opts, quiet, pool));
+ return SVN_NO_ERROR;
+ }
+
+ if (coloured_output == svn_tristate_true)
+ use_coloured_output = TRUE;
+ else if (coloured_output == svn_tristate_false)
+ use_coloured_output = FALSE;
+ else
+ use_coloured_output = (svn_cmdline__stdout_is_a_terminal()
+ && svn_cmdline__stderr_is_a_terminal());
+
+ if (non_interactive && force_interactive)
+ {
+ return svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("--non-interactive and --force-interactive "
+ "are mutually exclusive"));
+ }
+ else
+ non_interactive = !svn_cmdline__be_interactive(non_interactive,
+ force_interactive);
+
+ if (!non_interactive)
+ {
+ if (trust_unknown_ca || trust_cn_mismatch || trust_expired
+ || trust_not_yet_valid || trust_other_failure)
+ return svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("--trust-server-cert-failures requires "
+ "--non-interactive"));
+ }
+
+ /* --password-from-stdin can only be used with --non-interactive */
+ if (read_pass_from_stdin && !non_interactive)
+ {
+ return svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ _("--password-from-stdin requires "
+ "--non-interactive"));
+ }
+
+ /* Now initialize the client context */
+
+ err = svn_config_get_config(&cfg_hash, config_dir, pool);
+ if (err)
+ {
+ /* Fallback to default config if the config directory isn't readable
+ or is not a directory. */
+ if (APR_STATUS_IS_EACCES(err->apr_err)
+ || SVN__APR_STATUS_IS_ENOTDIR(err->apr_err))
+ {
+ svn_handle_warning2(stderr, err, "svnmover: ");
+ svn_error_clear(err);
+
+ SVN_ERR(svn_config__get_default_config(&cfg_hash, pool));
+ }
+ else
+ return err;
+ }
+
+ if (config_options)
+ {
+ svn_error_clear(
+ svn_cmdline__apply_config_options(cfg_hash, config_options,
+ "svnmover: ", "--config-option"));
+ }
+
+ /* Get password from stdin if necessary */
+ if (read_pass_from_stdin)
+ {
+ SVN_ERR(svn_cmdline__stdin_readline(&password, pool, pool));
+ }
+
+ SVN_ERR(svn_client_create_context2(&ctx, cfg_hash, pool));
+
+ cfg_config = svn_hash_gets(cfg_hash, SVN_CONFIG_CATEGORY_CONFIG);
+ SVN_ERR(svn_cmdline_create_auth_baton2(&ctx->auth_baton,
+ non_interactive,
+ username,
+ password,
+ config_dir,
+ no_auth_cache,
+ trust_unknown_ca,
+ trust_cn_mismatch,
+ trust_expired,
+ trust_not_yet_valid,
+ trust_other_failure,
+ cfg_config,
+ ctx->cancel_func,
+ ctx->cancel_baton,
+ pool));
+
+ /* Get the commit log message */
+ SVN_ERR(get_log_message(&log_msg, message, revprops, filedata,
+ pool, pool));
+
+ /* Put the log message in the list of revprops, and check that the user
+ did not try to supply any other "svn:*" revprops. */
+ if (svn_prop_has_svn_prop(revprops, pool))
+ return svn_error_create(SVN_ERR_CLIENT_PROPERTY_NAME, NULL,
+ _("Standard properties can't be set "
+ "explicitly as revision properties"));
+ if (log_msg)
+ {
+ svn_hash_sets(revprops, SVN_PROP_REVISION_LOG,
+ svn_string_create(log_msg, pool));
+ }
+
+ /* Help command: if given before any actions, then display full help
+ (and ANCHOR_URL need not have been provided). */
+ if (opts->ind < opts->argc && strcmp(opts->argv[opts->ind], "help") == 0)
+ {
+ usage(stdout, pool);
+ return SVN_NO_ERROR;
+ }
+
+ if (!anchor_url)
+ return svn_error_createf(SVN_ERR_INCORRECT_PARAMS, NULL,
+ "--root-url (-U) not provided");
+
+ /* Copy the rest of our command-line arguments to an array,
+ UTF-8-ing them along the way. */
+ /* If there are extra arguments in a supplementary file, tack those
+ on, too (again, in UTF8 form). */
+ action_args = apr_array_make(pool, opts->argc, sizeof(const char *));
+ if (extra_args_file)
+ {
+ svn_stringbuf_t *contents, *contents_utf8;
+
+ SVN_ERR(svn_stringbuf_from_file2(&contents, extra_args_file, pool));
+ SVN_ERR(svn_utf_stringbuf_to_utf8(&contents_utf8, contents, pool));
+ svn_cstring_split_append(action_args, contents_utf8->data, "\n\r",
+ FALSE, pool);
+ }
+
+ interactive_actions = !(opts->ind < opts->argc
+ || extra_args_file
+ || non_interactive);
+
+ if (interactive_actions)
+ {
+#ifdef HAVE_LINENOISE
+ linenoiseSetCompletionCallback(linenoise_completion);
+#endif
+ }
+
+ SVN_ERR(wc_create(&wc,
+ anchor_url, base_revision,
+ branch_id,
+ ctx, pool, pool));
+
+ do
+ {
+ /* Parse arguments -- converting local style to internal style,
+ * repos-relative URLs to regular URLs, etc. */
+ err = svn_client_args_to_target_array2(&action_args, opts, action_args,
+ ctx, FALSE, pool);
+ if (! err)
+ err = parse_actions(&actions, action_args, pool);
+ if (! err)
+ err = execute(wc, actions, anchor_url, revprops, ctx, pool);
+ if (err)
+ {
+ if (err->apr_err == SVN_ERR_AUTHN_FAILED && non_interactive)
+ err = svn_error_quick_wrap(err,
+ _("Authentication failed and interactive"
+ " prompting is disabled; see the"
+ " --force-interactive option"));
+ if (interactive_actions)
+ {
+ /* Display the error, but don't quit */
+ settext_stderr(TEXT_FG_RED);
+ svn_handle_error2(err, stderr, FALSE, "svnmover: ");
+ settext_stderr(TEXT_RESET);
+ svn_error_clear(err);
+ }
+ else
+ SVN_ERR(err);
+ }
+
+ /* Possibly read more actions from the command line */
+ if (interactive_actions)
+ {
+ SVN_ERR(read_words(&action_args, "svnmover> ", pool));
+ }
+ }
+ while (interactive_actions && action_args);
+
+ /* Final commit */
+ err = commit(NULL, wc, revprops, pool);
+ svn_pool_destroy(wc->pool);
+ SVN_ERR(err);
+
+ return SVN_NO_ERROR;
+}
+
+int
+main(int argc, const char *argv[])
+{
+ apr_pool_t *pool;
+ int exit_code = EXIT_SUCCESS;
+ svn_error_t *err;
+
+ /* Initialize the app. */
+ if (svn_cmdline_init("svnmover", stderr) != EXIT_SUCCESS)
+ return EXIT_FAILURE;
+
+ /* Create our top-level pool. Use a separate mutexless allocator,
+ * given this application is single threaded.
+ */
+ pool = apr_allocator_owner_get(svn_pool_create_allocator(FALSE));
+
+ svn_error_set_malfunction_handler(svn_error_raise_on_malfunction);
+
+ err = sub_main(&exit_code, argc, argv, pool);
+
+ /* Flush stdout and report if it fails. It would be flushed on exit anyway
+ but this makes sure that output is not silently lost if it fails. */
+ err = svn_error_compose_create(err, svn_cmdline_fflush(stdout));
+
+ if (err)
+ {
+ exit_code = EXIT_FAILURE;
+ settext_stderr(TEXT_FG_RED);
+ svn_cmdline_handle_exit_error(err, NULL, "svnmover: ");
+ settext_stderr(TEXT_RESET);
+ }
+
+ svn_pool_destroy(pool);
+ return exit_code;
+}
diff --git a/tools/dev/svnmover/svnmover.h b/tools/dev/svnmover/svnmover.h
new file mode 100644
index 0000000..a2d8424
--- /dev/null
+++ b/tools/dev/svnmover/svnmover.h
@@ -0,0 +1,295 @@
+/**
+ * @copyright
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ * @endcopyright
+ *
+ * @file svnmover.h
+ * @brief Concept Demo for Move Tracking and Branching
+ */
+
+#ifndef SVNMOVER_H
+#define SVNMOVER_H
+
+#include "svn_types.h"
+#include "svn_client.h"
+#include "svn_ra.h"
+
+#include "private/svn_branch.h"
+#include "private/svn_branch_compat.h"
+
+/* Decide whether to use the 'linenoise' library for command-line input
+ editing and completion. */
+#ifndef WIN32
+#define HAVE_LINENOISE
+#endif
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+/* Like apr_hash_overlay() and apr_hash_merge() except allocating the
+ * result in the pool of the first input hash (OVERLAY and H1 respectively).
+ *
+ * When APR pool debugging is enabled, these functions require that the
+ * result pool does not have greater lifetime than the inputs, so passing
+ * an arbitrary result pool doesn't work well.
+ *
+ * If the second hash's pool has a shorter lifetime than that of the first,
+ * you're out of luck.
+ */
+#define hash_overlay(overlay, base) \
+ apr_hash_overlay(apr_hash_pool_get(overlay), overlay, base)
+#define hash_merge(overlay, h1) \
+ apr_hash_merge(apr_hash_pool_get(overlay), h1, h2, merger, data)
+
+
+enum { UI_MODE_EIDS, UI_MODE_PATHS, UI_MODE_SERIAL };
+extern int the_ui_mode;
+
+
+/* Display PROMPT_STR, read a line of text, and set *RESULT to that line.
+ *
+ * The interface here is similar to svn_cmdline_prompt_user2().
+ */
+svn_error_t *
+svnmover_prompt_user(const char **result,
+ const char *prompt_str,
+ apr_pool_t *pool);
+
+/* Print a notification. */
+__attribute__((format(printf, 1, 2)))
+void
+svnmover_notify(const char *fmt,
+ ...);
+
+/* Print a verbose notification: in 'quiet' mode, don't print it. */
+__attribute__((format(printf, 1, 2)))
+void
+svnmover_notify_v(const char *fmt,
+ ...);
+
+typedef struct svnmover_wc_version_t
+{
+ svn_revnum_t revision; /* always SVN_INVALID_REVNUM in working version */
+ svn_branch__state_t *branch;
+} svnmover_wc_version_t;
+
+/* Return (left, right) pairs of element content that differ between
+ * LEFT and RIGHT.
+ *
+ * Examine only the elements listed in ELEMENTS, a hash of (eid ->
+ * [anything]). If ELEMENTS is NULL, use the union of LEFT and RIGHT.
+ *
+ * LEFT and/or RIGHT may be null, meaning an empty set of elements.
+ *
+ * Set *DIFF_P to a hash of (eid -> (svn_element__content_t *)[2]).
+ */
+svn_error_t *
+svnmover_element_differences(apr_hash_t **diff_p,
+ const svn_element__tree_t *left,
+ const svn_element__tree_t *right,
+ apr_hash_t *elements,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool);
+
+/* */
+typedef struct conflict_storage_t conflict_storage_t;
+
+typedef struct svnmover_wc_t
+{
+ apr_pool_t *pool;
+ const char *repos_root_url;
+ /*const char *anchor_repos_relpath;*/
+ svn_revnum_t head_revision;
+
+ svn_ra_session_t *ra_session;
+ svn_branch__txn_t *edit_txn;
+ conflict_storage_t *conflicts;
+
+ /* The base revisions, for (at least) all EIDs in BASE:
+ branch_id -> hash { eid -> revnum } */
+ apr_hash_t *base_revs;
+
+ /* Base and working versions. */
+ svnmover_wc_version_t *base, *working;
+
+ /* Textual list of commands the commands that were executed, suitable
+ for putting in a log message */
+ const char *list_of_commands;
+
+ svn_client_ctx_t *ctx;
+
+} svnmover_wc_t;
+
+struct conflict_storage_t
+{
+ svn_branch__state_t *yca_branch, *src_branch, *tgt_branch, *merged_branch;
+
+ /* Single-element conflicts */
+ /* (eid -> element_merge3_conflict_t) */
+ apr_hash_t *element_merge_conflicts;
+
+ /* Name-clash conflicts */
+ /* ("%{parent_eid}d/%{name}s" -> name_clash_conflict_t) */
+ apr_hash_t *name_clash_conflicts;
+
+ /* Cycle conflicts */
+ /* (eid -> cycle_conflict_t) */
+ apr_hash_t *cycle_conflicts;
+
+ /* Orphan conflicts */
+ /* (eid -> orphan_conflict_t) */
+ apr_hash_t *orphan_conflicts;
+};
+
+/* Three-way-merge the changes from YCA to SRC and YCA to TGT. YCA is
+ * notionally the youngest common ancestor of SRC and TGT.
+ *
+ * The elements to merge are the union of the elements in the three input
+ * subtrees (SRC, TGT, YCA). For each such element, merge the two changes:
+ * YCA -> SRC and YCA -> TGT, applying the result to EDIT_TXN:EDIT_BRANCH.
+ *
+ * If conflicts arise, return them in *CONFLICT_STORAGE_P; otherwise set
+ * that to null.
+ *
+ * SRC, TGT and YCA must be existing and corresponding (same EID) elements.
+ *
+ * None of SRC, TGT and YCA is a subbranch root element.
+ *
+ * Nested subbranches will also be merged.
+ */
+svn_error_t *
+svnmover_branch_merge(svn_branch__txn_t *edit_txn,
+ svn_branch__state_t *edit_branch,
+ conflict_storage_t **conflict_storage_p,
+ svn_branch__el_rev_id_t *src,
+ svn_branch__el_rev_id_t *tgt,
+ svn_branch__el_rev_id_t *yca,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool);
+
+/* */
+svn_error_t *
+svnmover_display_conflicts(conflict_storage_t *conflict_storage,
+ apr_pool_t *scratch_pool);
+
+svn_error_t *
+svnmover_conflict_resolved(conflict_storage_t *conflicts,
+ const char *id_string,
+ apr_pool_t *scratch_pool);
+
+/* */
+svn_boolean_t
+svnmover_any_conflicts(const conflict_storage_t *conflicts);
+
+/* Load branching info.
+ */
+svn_error_t *
+svn_ra_load_branching_state(svn_branch__txn_t **branching_txn_p,
+ svn_branch__compat_fetch_func_t *fetch_func,
+ void **fetch_baton,
+ svn_ra_session_t *session,
+ const char *branch_info_dir,
+ svn_revnum_t base_revision,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool);
+
+/* Ev3 version of svn_ra_get_commit_editor().
+ *
+ * If BRANCH_INFO_DIR is non-null, store branching info in that local
+ * directory, otherwise store branching info in revprops.
+ */
+svn_error_t *
+svn_ra_get_commit_txn(svn_ra_session_t *session,
+ svn_branch__txn_t **edit_txn_p,
+ apr_hash_t *revprop_table,
+ svn_commit_callback2_t commit_callback,
+ void *commit_baton,
+ apr_hash_t *lock_tokens,
+ svn_boolean_t keep_locks,
+ const char *branch_info_dir,
+ apr_pool_t *pool);
+
+/** Describes a server-side move (really a copy+delete within the same
+ * revision) which has been identified by scanning the revision log.
+ */
+typedef struct svn_repos_move_info_t {
+ /* The repository relpath the node was moved from. */
+ const char *moved_from_repos_relpath;
+
+ /* The repository relpath the node was moved to. */
+ const char *moved_to_repos_relpath;
+
+ /* The revision in which the move happened. */
+ svn_revnum_t revision;
+
+ /* The copyfrom revision of the moved-to path. */
+ svn_revnum_t copyfrom_rev;
+
+ /* Pointers to previous or subsequent moves of the same node
+ * within interesting history. */
+ struct svn_repos_move_info_t *prev;
+ struct svn_repos_move_info_t *next;
+
+ /* @note Fields may be added to the end of this structure in future
+ * versions. Therefore, to preserve binary compatibility, users
+ * should not directly allocate structures of this type but should use
+ * svn_wc_create_repos_move_info(). */
+} svn_repos_move_info_t;
+
+/** Create a svn_wc_repos_move_info_t structure.
+ * @see svn_wc_repos_move_info_t
+ */
+svn_repos_move_info_t *
+svn_repos_move_info_create(const char *moved_from_repos_relpath,
+ const char *moved_to_repos_relpath,
+ svn_revnum_t revision,
+ svn_revnum_t copyfrom_rev,
+ svn_repos_move_info_t *prev,
+ svn_repos_move_info_t *next,
+ apr_pool_t *result_pool);
+
+/* ...
+ */
+const char *
+svn_client__format_move_chain_for_display(svn_repos_move_info_t *first_move,
+ const char *indent,
+ apr_pool_t *result_pool);
+/* ...
+ */
+svn_error_t *
+svn_client__get_repos_moves(apr_hash_t **moves,
+ const char *anchor_abspath,
+ svn_ra_session_t *ra_session,
+ svn_revnum_t start,
+ svn_revnum_t end,
+ svn_client_ctx_t *ctx,
+ apr_pool_t *result_pool,
+ apr_pool_t *scratch_pool);
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* SVNMOVER_H */
+
diff --git a/tools/dev/svnmover/util.c b/tools/dev/svnmover/util.c
new file mode 100644
index 0000000..1ee018f
--- /dev/null
+++ b/tools/dev/svnmover/util.c
@@ -0,0 +1,59 @@
+/*
+ * util.c: Utility functions for 'svnmover'
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "svnmover.h"
+
+#ifdef HAVE_LINENOISE
+#include "linenoise/linenoise.c"
+#else
+#include "svn_cmdline.h"
+#endif
+
+
+svn_error_t *
+svnmover_prompt_user(const char **result,
+ const char *prompt_str,
+ apr_pool_t *pool)
+{
+#ifdef HAVE_LINENOISE
+ char *input;
+
+ input = linenoise(prompt_str);
+ if (! input)
+ {
+ return svn_error_create(SVN_ERR_CANCELLED, NULL, NULL);
+ }
+ /* add the line to the recallable history (if non-empty) */
+ if (input && *input)
+ {
+ linenoiseHistoryAdd(input);
+ }
+ *result = apr_pstrdup(pool, input);
+ free(input);
+#else
+ SVN_ERR(svn_cmdline_prompt_user2(result, prompt_str, NULL, pool));
+#endif
+ return SVN_NO_ERROR;
+}
+
+
diff --git a/tools/dev/svnqlite3-dump b/tools/dev/svnqlite3-dump
new file mode 100755
index 0000000..1b94f38
--- /dev/null
+++ b/tools/dev/svnqlite3-dump
@@ -0,0 +1,50 @@
+#!/usr/bin/perl -lpw
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# USAGE:
+# sqlite3 .svn/wc.db .dump | $0
+# $0 /path/to/wc
+# $0 /path/to/wc/.svn/wc.db
+# DOES:
+# decodes blobs (eg, property skels) and dates to human-readable form
+# REQUIRES:
+# sqlite3(1) (second and third usage forms only)
+
+BEGIN {
+ # locate sqlite3
+ my $sqlite3 = $ENV{SQLITE3} || "sqlite3";
+ # set stdin
+ my $file = shift;
+ $file = "." if -t and not $file;
+ if ($file) {
+ $file .= "/.svn/wc.db" if -e "$file/.svn/wc.db";
+ close STDIN;
+ open STDIN, "-|", $sqlite3, $file, '.dump';
+ } else {
+ # filter stdin to stdout
+ }
+}
+
+# X'68656C6C6F' => "hello"
+1 while s/X'([0-9A-F]{2})/chr(hex $1) . q[X']/e;
+s/X''//g;
+s/\n/\\n/g; # multiline props
+
+# 1288312835000000 => "Fri Oct 29 02:40:35 2010"
+s/(?<=,)(\d\d\d\d\d\d\d\d\d\d)\d\d\d\d\d\d(?=,)/sprintf '"%s"', scalar localtime $1/eg;
diff --git a/tools/dev/svnraisetreeconflict/svnraisetreeconflict.c b/tools/dev/svnraisetreeconflict/svnraisetreeconflict.c
new file mode 100644
index 0000000..65825d5
--- /dev/null
+++ b/tools/dev/svnraisetreeconflict/svnraisetreeconflict.c
@@ -0,0 +1,415 @@
+/* svnraisetreeconflict
+ *
+ * This is a crude command line tool that publishes API to create
+ * tree-conflict markings in a working copy.
+ *
+ * To compile this, go to the root of the Subversion source tree and
+ * call `make svnraisetreeconflict'. You will find the executable file
+ * next to this source file.
+ *
+ * If you want to "install" svnraisetreeconflict, you may call
+ * `make install-tools' in the Subversion source tree root.
+ * (Note: This also installs any other installable tools.)
+ *
+ * svnraisetreeconflict cannot be compiled separate from a Subversion
+ * source tree.
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "svn_cmdline.h"
+#include "svn_pools.h"
+#include "svn_wc.h"
+#include "svn_utf.h"
+#include "svn_path.h"
+#include "svn_opt.h"
+#include "svn_version.h"
+
+#include "private/svn_wc_private.h"
+#include "private/svn_cmdline_private.h"
+
+#include "svn_private_config.h"
+
+#define OPT_VERSION SVN_OPT_FIRST_LONGOPT_ID
+
+static svn_error_t *
+version(apr_pool_t *pool)
+{
+ return svn_opt_print_help4(NULL, "svnraisetreeconflict", TRUE, FALSE, FALSE,
+ NULL, NULL, NULL, NULL, NULL, NULL, pool);
+}
+
+static void
+usage(apr_pool_t *pool)
+{
+ svn_error_clear(svn_cmdline_fprintf
+ (stderr, pool,
+ _("Type 'svnraisetreeconflict --help' for usage.\n")));
+}
+
+/***************************************************************************
+ * "enum mapping" functions copied from subversion/libsvn_wc/tree_conflicts.c
+ **************************************************************************/
+
+/* A mapping between a string STR and an enumeration value VAL. */
+typedef struct enum_mapping_t
+{
+ const char *str;
+ int val;
+} enum_mapping_t;
+
+/* A map for svn_node_kind_t values. */
+static const enum_mapping_t node_kind_map[] =
+{
+ { "none", svn_node_none },
+ { "file", svn_node_file },
+ { "dir", svn_node_dir },
+ { "unknown", svn_node_unknown },
+ { NULL, 0 }
+};
+
+/* A map for svn_wc_operation_t values. */
+static const enum_mapping_t operation_map[] =
+{
+ { "update", svn_wc_operation_update },
+ { "switch", svn_wc_operation_switch },
+ { "merge", svn_wc_operation_merge },
+ { NULL, 0 }
+};
+
+/* A map for svn_wc_conflict_action_t values. */
+static const enum_mapping_t action_map[] =
+{
+ { "edit", svn_wc_conflict_action_edit },
+ { "delete", svn_wc_conflict_action_delete },
+ { "add", svn_wc_conflict_action_add },
+ { NULL, 0 }
+};
+
+/* A map for svn_wc_conflict_reason_t values. */
+static const enum_mapping_t reason_map[] =
+{
+ { "edited", svn_wc_conflict_reason_edited },
+ { "deleted", svn_wc_conflict_reason_deleted },
+ { "missing", svn_wc_conflict_reason_missing },
+ { "obstructed", svn_wc_conflict_reason_obstructed },
+ { "added", svn_wc_conflict_reason_added },
+ { NULL, 0 }
+};
+
+/* Parse the enumeration field pointed to by *START into *RESULT as a plain
+ * 'int', using MAP to convert from strings to enumeration values.
+ * In MAP, a null STR field marks the end of the map.
+ * Don't read further than END.
+ * After reading, make *START point to the character after the field.
+ */
+static svn_error_t *
+read_enum_field(int *result,
+ const enum_mapping_t *map,
+ const char *str,
+ apr_pool_t *pool)
+{
+ int i;
+
+ /* Find STR in MAP; error if not found. */
+ for (i = 0; ; i++)
+ {
+ if (map[i].str == NULL)
+ return svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ "Unrecognised parameter value: '%s'", str);
+ if (strcmp(str, map[i].str) == 0)
+ break;
+ }
+
+ *result = map[i].val;
+ return SVN_NO_ERROR;
+}
+
+static const char*
+get_enum_str(const enum_mapping_t *map,
+ int enum_val)
+{
+ int i;
+ for (i = 0; map[i].str != NULL; i++)
+ {
+ if (map[i].val == enum_val)
+ return map[i].str;
+ }
+ return NULL;
+}
+
+static void
+print_enum_map(const enum_mapping_t *map,
+ apr_pool_t *pool)
+{
+ int i;
+ for (i = 0; map[i].str != NULL; i++)
+ svn_error_clear(svn_cmdline_fprintf(stdout, pool,
+ " %s", map[i].str));
+}
+
+static svn_error_t *
+raise_tree_conflict(int argc, const char **argv, apr_pool_t *pool)
+{
+ int i = 0;
+ svn_wc_conflict_version_t *left, *right;
+ svn_wc_conflict_description2_t *c;
+ svn_wc_context_t *wc_ctx;
+
+ /* Conflict description parameters */
+ const char *wc_path, *wc_abspath;
+ const char *repos_url1, *repos_url2, *path_in_repos1, *path_in_repos2;
+ int operation, action, reason;
+ long peg_rev1, peg_rev2;
+ int kind, kind1, kind2;
+
+ if (argc != 13)
+ return svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
+ "Wrong number of arguments");
+
+ /* Read the parameters */
+ wc_path = svn_dirent_internal_style(argv[i++], pool);
+ SVN_ERR(read_enum_field(&kind, node_kind_map, argv[i++], pool));
+ SVN_ERR(read_enum_field(&operation, operation_map, argv[i++], pool));
+ SVN_ERR(read_enum_field(&action, action_map, argv[i++], pool));
+ SVN_ERR(read_enum_field(&reason, reason_map, argv[i++], pool));
+ repos_url1 = argv[i++];
+ path_in_repos1 = argv[i++];
+ peg_rev1 = atol(argv[i++]);
+ SVN_ERR(read_enum_field(&kind1, node_kind_map, argv[i++], pool));
+ repos_url2 = argv[i++];
+ path_in_repos2 = argv[i++];
+ peg_rev2 = atol(argv[i++]);
+ SVN_ERR(read_enum_field(&kind2, node_kind_map, argv[i++], pool));
+
+
+ /* Allocate and fill in the description data structures */
+ SVN_ERR(svn_dirent_get_absolute(&wc_abspath, wc_path, pool));
+ left = svn_wc_conflict_version_create2(repos_url1, NULL, path_in_repos1,
+ peg_rev1, kind1, pool);
+ right = svn_wc_conflict_version_create2(repos_url2, NULL, path_in_repos2,
+ peg_rev2, kind2, pool);
+ c = svn_wc_conflict_description_create_tree2(wc_abspath, kind,
+ operation, left, right, pool);
+ c->action = (svn_wc_conflict_action_t)action;
+ c->reason = (svn_wc_conflict_reason_t)reason;
+
+ /* Raise the conflict */
+ SVN_ERR(svn_wc_context_create(&wc_ctx, NULL, pool, pool));
+ SVN_ERR(svn_wc__add_tree_conflict(wc_ctx, c, pool));
+
+ return SVN_NO_ERROR;
+}
+
+
+static void
+help(const apr_getopt_option_t *options, apr_pool_t *pool)
+{
+ svn_error_clear
+ (svn_cmdline_fprintf
+ (stdout, pool,
+ _("usage: svnraisetreeconflict [OPTIONS] WC_PATH NODE_KIND OPERATION ACTION REASON REPOS_URL1 PATH_IN_REPOS1 PEG_REV1 NODE_KIND1 REPOS_URL2 PATH_IN_REPOS2 PEG_REV2 NODE_KIND2\n\n"
+ " Mark the working-copy node WC_PATH as being the victim of a tree conflict.\n"
+ "\n"
+ " WC_PATH's parent directory must be a working copy, otherwise a\n"
+ " tree conflict cannot be raised.\n"
+ "\n"
+ "Valid options:\n")));
+ while (options->description)
+ {
+ const char *optstr;
+ svn_opt_format_option(&optstr, options, TRUE, pool);
+ svn_error_clear(svn_cmdline_fprintf(stdout, pool, " %s\n", optstr));
+ ++options;
+ }
+ svn_error_clear(svn_cmdline_fprintf(stdout, pool,
+ _("\n"
+ "Valid enum argument values:\n"
+ " NODE_KIND, NODE_KIND1, NODE_KIND2:\n"
+ " ")));
+ print_enum_map(node_kind_map, pool);
+ svn_error_clear(svn_cmdline_fprintf(stdout, pool,
+ _("\n"
+ " OPERATION:\n"
+ " ")));
+ print_enum_map(operation_map, pool);
+ svn_error_clear(svn_cmdline_fprintf(stdout, pool,
+ _("\n"
+ " ACTION (what svn tried to do):\n"
+ " ")));
+ print_enum_map(action_map, pool);
+ svn_error_clear(svn_cmdline_fprintf(stdout, pool,
+ _("\n"
+ " REASON (what local change made svn fail):\n"
+ " ")));
+ print_enum_map(reason_map, pool);
+ svn_error_clear(svn_cmdline_fprintf(stdout, pool,
+ _("\n"
+ " REPOS_URL1, REPOS_URL2:\n"
+ " The URL of the repository itself, e.g.: file://usr/repos\n"
+ " PATH_IN_REPOS1, PATH_IN_REPOS2:\n"
+ " The complete path of the node in the repository, e.g.: sub/dir/foo\n"
+ " PEG_REV1, PEG_REV2:\n"
+ " The revision number at which the given path is relevant.\n"
+ "\n"
+ "Example:\n"
+ " svnraisetreeconflict ./foo %s %s %s %s file://usr/repos sub/dir/foo 1 %s file://usr/repos sub/dir/foo 3 %s\n\n"),
+ get_enum_str(node_kind_map, svn_node_file),
+ get_enum_str(operation_map, svn_wc_operation_update),
+ get_enum_str(action_map, svn_wc_conflict_action_delete),
+ get_enum_str(reason_map, svn_wc_conflict_reason_deleted),
+ get_enum_str(node_kind_map, svn_node_file),
+ get_enum_str(node_kind_map, svn_node_none)
+ ));
+}
+
+
+/* Version compatibility check */
+static svn_error_t *
+check_lib_versions(void)
+{
+ static const svn_version_checklist_t checklist[] =
+ {
+ { "svn_subr", svn_subr_version },
+ { "svn_wc", svn_wc_version },
+ { NULL, NULL }
+ };
+ SVN_VERSION_DEFINE(my_version);
+
+ return svn_ver_check_list2(&my_version, checklist, svn_ver_equal);
+}
+
+/*
+ * On success, leave *EXIT_CODE untouched and return SVN_NO_ERROR. On error,
+ * either return an error to be displayed, or set *EXIT_CODE to non-zero and
+ * return SVN_NO_ERROR.
+ */
+static svn_error_t *
+sub_main(int *exit_code, int argc, const char *argv[], apr_pool_t *pool)
+{
+ apr_getopt_t *os;
+ const apr_getopt_option_t options[] =
+ {
+ {"help", 'h', 0, N_("display this help")},
+ {"version", OPT_VERSION, 0,
+ N_("show program version information")},
+ {0, 0, 0, 0}
+ };
+ apr_array_header_t *remaining_argv;
+
+ /* Check library versions */
+ SVN_ERR(check_lib_versions());
+
+#if defined(WIN32) || defined(__CYGWIN__)
+ /* Set the working copy administrative directory name. */
+ if (getenv("SVN_ASP_DOT_NET_HACK"))
+ {
+ SVN_ERR(svn_wc_set_adm_dir("_svn", pool));
+ }
+#endif
+
+ SVN_ERR(svn_cmdline__getopt_init(&os, argc, argv, pool));
+
+ os->interleave = 1;
+ while (1)
+ {
+ int opt;
+ const char *arg;
+ apr_status_t status = apr_getopt_long(os, options, &opt, &arg);
+ if (APR_STATUS_IS_EOF(status))
+ break;
+ if (status != APR_SUCCESS)
+ {
+ usage(pool);
+ *exit_code = EXIT_FAILURE;
+ return SVN_NO_ERROR;
+ }
+
+ switch (opt)
+ {
+ case 'h':
+ help(options, pool);
+ return SVN_NO_ERROR;
+ case OPT_VERSION:
+ SVN_ERR(version(pool));
+ return SVN_NO_ERROR;
+ default:
+ usage(pool);
+ *exit_code = EXIT_FAILURE;
+ return SVN_NO_ERROR;
+ }
+ }
+
+ /* Convert the remaining arguments to UTF-8. */
+ remaining_argv = apr_array_make(pool, 0, sizeof(const char *));
+ while (os->ind < argc)
+ {
+ const char *s;
+
+ SVN_ERR(svn_utf_cstring_to_utf8(&s, os->argv[os->ind++], pool));
+ APR_ARRAY_PUSH(remaining_argv, const char *) = s;
+ }
+
+ if (remaining_argv->nelts < 1)
+ {
+ usage(pool);
+ *exit_code = EXIT_FAILURE;
+ return SVN_NO_ERROR;
+ }
+
+ /* Do the main task */
+ SVN_ERR(raise_tree_conflict(remaining_argv->nelts,
+ (const char **)remaining_argv->elts,
+ pool));
+
+ return SVN_NO_ERROR;
+}
+
+int
+main(int argc, const char *argv[])
+{
+ apr_pool_t *pool;
+ int exit_code = EXIT_SUCCESS;
+ svn_error_t *err;
+
+ /* Initialize the app. */
+ if (svn_cmdline_init("svnraisetreeconflict", stderr) != EXIT_SUCCESS)
+ return EXIT_FAILURE;
+
+ /* Create our top-level pool. Use a separate mutexless allocator,
+ * given this application is single threaded.
+ */
+ pool = apr_allocator_owner_get(svn_pool_create_allocator(FALSE));
+
+ err = sub_main(&exit_code, argc, argv, pool);
+
+ /* Flush stdout and report if it fails. It would be flushed on exit anyway
+ but this makes sure that output is not silently lost if it fails. */
+ err = svn_error_compose_create(err, svn_cmdline_fflush(stdout));
+
+ if (err)
+ {
+ exit_code = EXIT_FAILURE;
+ svn_cmdline_handle_exit_error(err, NULL, "svnraisetreeconflict: ");
+ }
+
+ svn_pool_destroy(pool);
+ return exit_code;
+}
diff --git a/tools/dev/trails.py b/tools/dev/trails.py
new file mode 100755
index 0000000..917d234
--- /dev/null
+++ b/tools/dev/trails.py
@@ -0,0 +1,229 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+## See the usage() function for operating instructions. ##
+
+import re
+try:
+ # Python >=2.6
+ from functools import reduce
+except ImportError:
+ # Python <2.6
+ pass
+import sys
+import operator
+
+_re_trail = re.compile('\((?P<txn_body>[a-z_]*), (?P<filename>[a-z_\-./]*), (?P<lineno>[0-9]*), (?P<txn>0|1)\): (?P<ops>.*)')
+_re_table_op = re.compile('\(([a-z]*), ([a-z]*)\)')
+
+_separator = '------------------------------------------------------------\n'
+
+def parse_trails_log(infile):
+ trails = []
+ lineno = 0
+ for line in infile.readlines():
+ m = _re_trail.match(line)
+
+ lineno = lineno + 1
+
+ if not m:
+ sys.stderr.write('Invalid input, line %u:\n%s\n' % (lineno, line))
+ sys.exit(1)
+
+ txn = int(m.group('txn'))
+ if not txn:
+ ### We're not interested in trails that don't use txns at this point.
+ continue
+
+ txn_body = (m.group('txn_body'), m.group('filename'),
+ int(m.group('lineno')))
+ trail = _re_table_op.findall(m.group('ops'))
+ trail.reverse()
+
+ if not trail:
+ sys.stderr.write('Warning! Empty trail at line %u:\n%s' % (lineno, line))
+
+ trails.append((txn_body, trail))
+
+ return trails
+
+
+def output_summary(trails, outfile):
+ ops = []
+ for (txn_body, trail) in trails:
+ ops.append(len(trail))
+ ops.sort()
+
+ total_trails = len(ops)
+ total_ops = reduce(operator.add, ops)
+ max_ops = ops[-1]
+ median_ops = ops[total_trails / 2]
+ average_ops = float(total_ops) / total_trails
+
+ outfile.write(_separator)
+ outfile.write('Summary\n')
+ outfile.write(_separator)
+ outfile.write('Total number of trails: %10i\n' % total_trails)
+ outfile.write('Total number of ops: %10i\n' % total_ops)
+ outfile.write('max ops/trail: %10i\n' % max_ops)
+ outfile.write('median ops/trail: %10i\n' % median_ops)
+ outfile.write('average ops/trail: %10.2f\n' % average_ops)
+ outfile.write('\n')
+
+
+# custom compare function
+def _freqtable_cmp(a_b, c_d):
+ (a, b) = a_b
+ (c, d) = c_d
+ c = cmp(d, b)
+ if not c:
+ c = cmp(a, c)
+ return c
+
+def list_frequencies(list):
+ """
+ Given a list, return a list composed of (item, frequency)
+ in sorted order
+ """
+
+ counter = {}
+ for item in list:
+ counter[item] = counter.get(item, 0) + 1
+
+ frequencies = list(counter.items())
+ frequencies.sort(_freqtable_cmp)
+
+ return frequencies
+
+
+def output_trail_length_frequencies(trails, outfile):
+ ops = []
+ for (txn_body, trail) in trails:
+ ops.append(len(trail))
+
+ total_trails = len(ops)
+ frequencies = list_frequencies(ops)
+
+ outfile.write(_separator)
+ outfile.write('Trail length frequencies\n')
+ outfile.write(_separator)
+ outfile.write('ops/trail frequency percentage\n')
+ for (r, f) in frequencies:
+ p = float(f) * 100 / total_trails
+ outfile.write('%4i %6i %5.2f\n' % (r, f, p))
+ outfile.write('\n')
+
+
+def output_trail(outfile, trail, column = 0):
+ ### Output the trail itself, in its own column
+
+ if len(trail) == 0:
+ outfile.write('<empty>\n')
+ return
+
+ line = str(trail[0])
+ for op in trail[1:]:
+ op_str = str(op)
+ if len(line) + len(op_str) > 75 - column:
+ outfile.write('%s,\n' % line)
+ outfile.write(''.join(' ' * column))
+ line = op_str
+ else:
+ line = line + ', ' + op_str
+ outfile.write('%s\n' % line)
+
+ outfile.write('\n')
+
+
+def output_trail_frequencies(trails, outfile):
+
+ total_trails = len(trails)
+
+ ttrails = []
+ for (txn_body, trail) in trails:
+ ttrails.append((txn_body, tuple(trail)))
+
+ frequencies = list_frequencies(ttrails)
+
+ outfile.write(_separator)
+ outfile.write('Trail frequencies\n')
+ outfile.write(_separator)
+ outfile.write('frequency percentage ops/trail trail\n')
+ for (((txn_body, file, line), trail), f) in frequencies:
+ p = float(f) * 100 / total_trails
+ outfile.write('-- %s - %s:%u --\n' % (txn_body, file, line))
+ outfile.write('%6i %5.2f %4i ' % (f, p, len(trail)))
+ output_trail(outfile, trail, 37)
+
+
+def output_txn_body_frequencies(trails, outfile):
+ bodies = []
+ for (txn_body, trail) in trails:
+ bodies.append(txn_body)
+
+ total_trails = len(trails)
+ frequencies = list_frequencies(bodies)
+
+ outfile.write(_separator)
+ outfile.write('txn_body frequencies\n')
+ outfile.write(_separator)
+ outfile.write('frequency percentage txn_body\n')
+ for ((txn_body, file, line), f) in frequencies:
+ p = float(f) * 100 / total_trails
+ outfile.write('%6i %5.2f %s - %s:%u\n'
+ % (f, p, txn_body, file, line))
+
+
+def usage(pgm):
+ w = sys.stderr.write
+ w("%s: a program for analyzing Subversion trail usage statistics.\n" % pgm)
+ w("\n")
+ w("Usage:\n")
+ w("\n")
+ w(" Compile Subversion with -DSVN_FS__TRAIL_DEBUG, which will cause it\n")
+ w(" it to print trail statistics to stderr. Save the stats to a file,\n")
+ w(" invoke %s on the file, and ponder the output.\n" % pgm)
+ w("\n")
+
+
+if __name__ == '__main__':
+ if len(sys.argv) > 2:
+ sys.stderr.write("Error: too many arguments\n\n")
+ usage(sys.argv[0])
+ sys.exit(1)
+
+ if len(sys.argv) == 1:
+ infile = sys.stdin
+ else:
+ try:
+ infile = open(sys.argv[1])
+ except (IOError):
+ sys.stderr.write("Error: unable to open '%s'\n\n" % sys.argv[1])
+ usage(sys.argv[0])
+ sys.exit(1)
+
+ trails = parse_trails_log(infile)
+
+ output_summary(trails, sys.stdout)
+ output_trail_length_frequencies(trails, sys.stdout)
+ output_trail_frequencies(trails, sys.stdout)
+ output_txn_body_frequencies(trails, sys.stdout)
diff --git a/tools/dev/unix-build/Makefile.svn b/tools/dev/unix-build/Makefile.svn
new file mode 100644
index 0000000..2c0561c
--- /dev/null
+++ b/tools/dev/unix-build/Makefile.svn
@@ -0,0 +1,2112 @@
+# vim: noexpandtab tabstop=8 shiftwidth=8 syntax=make
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# WARNING: This may or may not work on your system. This Makefile is
+# an example, rather than a ready-made universal solution.
+
+# ****************************************************************
+# ** IMPORTANT NOTE FOR SVN COMMITTERS: READ THIS. **
+# ** **
+# ****************************************************************
+# | This Makefile is used by the bb-openbsd buildbot |
+# | (http://ci.apache.org/builders/bb-openbsd). Please check |
+# | the bot's health after making changes to this file. |
+# |______________________________________________________________|
+
+PERL ?= yes
+ENABLE_PERL_BINDINGS = $(PERL)
+THREADING ?= yes
+ifeq ($(THREADING),yes)
+JAVA ?= yes
+else
+JAVA ?= no
+endif
+ENABLE_JAVA_BINDINGS = $(JAVA)
+USE_APR_ICONV ?= no # set to yes to use APR iconv instead of GNU iconv
+PARALLEL ?= 1
+CLEANUP ?= 1
+EXCLUSIVE_WC_LOCKS ?= 1
+USE_HTTPV1 ?= no
+USE_AUTHZ_SHORT_CIRCUIT ?= no
+RAMDISK ?= /ramdisk
+
+PWD = $(shell pwd)
+UNAME = $(shell uname)
+RUBY = $(shell which ruby 2>/dev/null)
+ifeq ($(RUBY),)
+RUBY = $(shell which ruby24 2>/dev/null)
+ifeq ($(RUBY),)
+RUBY = $(shell which ruby23 2>/dev/null)
+ifeq ($(RUBY),)
+RUBY = $(shell which ruby22 2>/dev/null)
+ifeq ($(RUBY),)
+RUBY = $(shell which ruby21 2>/dev/null)
+ifeq ($(RUBY),)
+RUBY = $(shell which ruby20 2>/dev/null)
+ifeq ($(RUBY),)
+RUBY = $(shell which ruby19 2>/dev/null)
+ifeq ($(RUBY),)
+RUBY = $(shell which ruby18)
+endif # 1.8
+endif # 1.9
+endif # 2.0
+endif # 2.1
+endif # 2.2
+endif # 2.3
+endif # 2.4
+
+TAG ?= none
+ifeq ($(TAG),none)
+BRANCH ?= trunk
+else
+BRANCH = $(TAG)
+endif
+WC ?= $(BRANCH)
+BRANCH_MAJOR = $(shell echo $(BRANCH) | \
+ sed -e 's/\([0-9]\)\.\([0-9]\)\.[x0-9].*$$/\1.\2/')
+SVN_REL_WC = svn-$(WC)
+SVN_WC = $(PWD)/$(SVN_REL_WC)
+PREFIX = $(PWD)/prefix
+SVN_PREFIX = $(PREFIX)/svn-$(WC)
+DISTDIR = $(PWD)/distfiles
+SRCDIR = $(PWD)/src
+OBJDIR = $(PWD)/objdir
+
+BDB_MAJOR_VER = 4.7
+BDB_VER = $(BDB_MAJOR_VER).25
+APR_VER = 1.5.2
+APR_ICONV_VER = 1.2.1
+GNU_ICONV_VER = 1.15
+APR_UTIL_VER = 1.5.4
+HTTPD_VER = 2.2.32
+NEON_VER = 0.30.2
+SERF_VER = 1.3.9
+SERF_OLD_VER = 0.3.1
+CYRUS_SASL_VER = 2.1.25
+SQLITE_VER = 3160200
+LIBMAGIC_VER = 5.30
+RUBY_VER = 2.4.2
+BZ2_VER = 1.0.6
+PYTHON_VER = 2.7.13
+JUNIT_VER = 4.10
+GETTEXT_VER = 0.19.8.1
+LZ4_VER = 1.7.5
+
+BDB_DIST = db-$(BDB_VER).tar.gz
+APR_ICONV_DIST = apr-iconv-$(APR_ICONV_VER).tar.gz
+GNU_ICONV_DIST = libiconv-$(GNU_ICONV_VER).tar.gz
+NEON_DIST = neon-$(NEON_VER).tar.gz
+SQLITE_DIST = sqlite-autoconf-$(SQLITE_VER).tar.gz
+CYRUS_SASL_DIST = cyrus-sasl-$(CYRUS_SASL_VER).tar.gz
+HTTPD_DIST = httpd-$(HTTPD_VER).tar.gz
+LIBMAGIC_DIST = file-$(LIBMAGIC_VER).tar.gz
+RUBY_DIST = ruby-$(RUBY_VER).tar.gz
+BZ2_DIST = bzip2-$(BZ2_VER).tar.gz
+PYTHON_DIST = Python-$(PYTHON_VER).tgz
+JUNIT_DIST = junit-${JUNIT_VER}.jar
+GETTEXT_DIST = gettext-$(GETTEXT_VER).tar.gz
+LZ4_DIST = lz4-$(LZ4_VER).tar.gz
+
+SHA256_${BDB_DIST} = f14fd96dd38915a1d63dcb94a63fbb8092334ceba6b5060760427096f631263e
+SHA256_${APR_ICONV_DIST} = 19381959d50c4a5f3b9c84d594a5f9ffb3809786919b3058281f4c87e1f4b245
+SHA256_${GNU_ICONV_DIST} = ccf536620a45458d26ba83887a983b96827001e92a13847b45e4925cc8913178
+SHA256_${HTTPD_DIST} = b6e1528779f99c301d6438d89ae892a311619b43a39f16297f9eabd4a8d16cb8
+SHA256_${NEON_DIST} = db0bd8cdec329b48f53a6f00199c92d5ba40b0f015b153718d1b15d3d967fbca
+SHA256_${CYRUS_SASL_DIST} = 418c16e6240a4f9b637cbe3d62937b9675627bad27c622191d47de8686fe24fe
+SHA256_${SQLITE_DIST} = 65cc0c3e9366f50c0679c5ccd31432cea894bc4a3e8947dabab88c8693263615
+SHA256_${LIBMAGIC_DIST} = 694c2432e5240187524c9e7cf1ec6acc77b47a0e19554d34c14773e43dbbf214
+SHA256_${RUBY_DIST} = 93b9e75e00b262bc4def6b26b7ae8717efc252c47154abb7392e54357e6c8c9c
+SHA256_${BZ2_DIST} = a2848f34fcd5d6cf47def00461fcb528a0484d8edef8208d6d2e2909dc61d9cd
+SHA256_${PYTHON_DIST} = a4f05a0720ce0fd92626f0278b6b433eee9a6173ddf2bced7957dfb599a5ece1
+SHA256_${JUNIT_DIST} = 36a747ca1e0b86f6ea88055b8723bb87030d627766da6288bf077afdeeb0f75a
+SHA256_${GETTEXT_DIST} = ff942af0e438ced4a8b0ea4b0b6e0d6d657157c5e2364de57baa279c1c125c43
+SHA256_${LZ4_DIST} = 0190cacd63022ccb86f44fa5041dc6c3804407ad61550ca21c382827319e7e7e
+
+define do_check_sha256
+if [ -x /bin/sha256 ]; then \
+ (cd $(DISTDIR) && \
+ echo "SHA256 (${1}) = ${SHA256_${1}}" | /bin/sha256 -C /dev/stdin "${1}"); \
+elif [ -x /usr/bin/sha256sum ]; then \
+ (cd $(DISTDIR) && \
+ echo "${SHA256_${1}} ${1}" | /usr/bin/sha256sum --quiet --check); \
+else \
+ echo "Error: No tool found to verify checksum"; \
+ false; \
+fi
+endef
+
+DISTFILES = $(DISTDIR)/$(NEON_DIST) \
+ $(DISTDIR)/$(SERF_DIST) \
+ $(DISTDIR)/$(SQLITE_DIST) \
+ $(DISTDIR)/$(HTTPD_DIST) \
+ $(DISTDIR)/$(APR_ICONV_DIST) \
+ $(DISTDIR)/$(GNU_ICONV_DIST) \
+ $(DISTDIR)/$(CYRUS_SASL_DIST) \
+ $(DISTDIR)/$(LIBMAGIC_DIST) \
+ $(DISTDIR)/$(RUBY_DIST) \
+ $(DISTDIR)/$(BZ2_DIST) \
+ $(DISTDIR)/$(PYTHON_DIST) \
+ $(DISTDIR)/$(JUNIT_DIST) \
+ $(DISTDIR)/$(GETTEXT_DIST)
+
+FETCH_CMD = wget -c
+
+SUBVERSION_REPOS_URL = https://svn.apache.org/repos/asf/subversion
+BDB_URL = http://download.oracle.com/berkeley-db/$(BDB_DIST)
+APR_URL = https://svn.apache.org/repos/asf/apr/apr
+APR_ICONV_URL = https://www.apache.org/dist/apr/$(APR_ICONV_DIST)
+GNU_ICONV_URL = https://ftp.gnu.org/pub/gnu/libiconv/$(GNU_ICONV_DIST)
+APR_UTIL_URL = https://svn.apache.org/repos/asf/apr/apr-util
+HTTPD_URL = https://archive.apache.org/dist/httpd/$(HTTPD_DIST)
+NEON_URL = http://webdav.org/neon/$(NEON_DIST)
+SERF_URL = https://svn.apache.org/repos/asf/serf/tags/$(SERF_VER)
+SERF_OLD_URL = https://svn.apache.org/repos/asf/serf/tags/$(SERF_OLD_VER)
+SQLITE_URL = https://www.sqlite.org/2017/$(SQLITE_DIST)
+CYRUS_SASL_URL = ftp://ftp.andrew.cmu.edu/pub/cyrus-mail/$(CYRUS_SASL_DIST)
+LIBMAGIC_URL = ftp://ftp.astron.com/pub/file/$(LIBMAGIC_DIST)
+RUBY_URL = https://cache.ruby-lang.org/pub/ruby/2.4/$(RUBY_DIST)
+BZ2_URL = http://bzip.org/$(BZ2_VER)/$(BZ2_DIST)
+PYTHON_URL = https://python.org/ftp/python/$(PYTHON_VER)/$(PYTHON_DIST)
+JUNIT_URL = https://downloads.sourceforge.net/project/junit/junit/$(JUNIT_VER)/$(JUNIT_DIST)
+GETTEXT_URL = https://ftp.gnu.org/pub/gnu/gettext/$(GETTEXT_DIST)
+LZ4_URL = https://github.com/lz4/lz4/archive/v$(LZ4_VER).tar.gz
+
+
+BDB_SRCDIR = $(SRCDIR)/db-$(BDB_VER)
+APR_SRCDIR = $(SRCDIR)/apr-$(APR_VER)
+APR_ICONV_SRCDIR = $(SRCDIR)/apr-iconv-$(APR_ICONV_VER)
+GNU_ICONV_SRCDIR = $(SRCDIR)/libiconv-$(GNU_ICONV_VER)
+APR_UTIL_SRCDIR = $(SRCDIR)/apr-util-$(APR_UTIL_VER)
+HTTPD_SRCDIR = $(SRCDIR)/httpd-$(HTTPD_VER)
+NEON_SRCDIR = $(SRCDIR)/neon-$(NEON_VER)
+SERF_SRCDIR = $(SRCDIR)/serf-$(SERF_VER)
+SERF_OLD_SRCDIR = $(SRCDIR)/serf-$(SERF_OLD_VER)
+SQLITE_SRCDIR = $(SRCDIR)/sqlite-autoconf-$(SQLITE_VER)
+CYRUS_SASL_SRCDIR = $(SRCDIR)/cyrus-sasl-$(CYRUS_SASL_VER)
+LIBMAGIC_SRCDIR = $(SRCDIR)/file-$(LIBMAGIC_VER)
+RUBY_SRCDIR = $(SRCDIR)/ruby-$(RUBY_VER)
+BZ2_SRCDIR = $(SRCDIR)/bzip2-$(BZ2_VER)
+PYTHON_SRCDIR = $(SRCDIR)/Python-$(PYTHON_VER)
+GETTEXT_SRCDIR = $(SRCDIR)/gettext-$(GETTEXT_VER)
+LZ4_SRCDIR = ${SRCDIR}/lz4-$(LZ4_VER)
+SVN_SRCDIR = $(SVN_WC)
+
+BDB_OBJDIR = $(OBJDIR)/db-$(BDB_VER)
+APR_OBJDIR = $(OBJDIR)/apr-$(APR_VER)
+APR_ICONV_OBJDIR = $(OBJDIR)/apr-iconv-$(APR_ICONV_VER)
+GNU_ICONV_OBJDIR = $(OBJDIR)/libiconv-$(GNU_ICONV_VER)
+APR_UTIL_OBJDIR = $(OBJDIR)/apr-util-$(APR_UTIL_VER)
+HTTPD_OBJDIR = $(OBJDIR)/httpd-$(HTTPD_VER)
+NEON_OBJDIR = $(OBJDIR)/neon-$(NEON_VER)
+SERF_OBJDIR = $(OBJDIR)/serf-$(SERF_VER)
+SERF_OLD_OBJDIR = $(OBJDIR)/serf-$(SERF_OLD_VER)
+SQLITE_OBJDIR = $(OBJDIR)/sqlite-$(SQLITE_VER)
+CYRUS_SASL_OBJDIR = $(OBJDIR)/cyrus-sasl-$(CYRUS_SASL_VER)
+LIBMAGIC_OBJDIR = $(OBJDIR)/file-$(LIBMAGIC_VER)
+RUBY_OBJDIR = $(OBJDIR)/ruby-$(RUBY_VER)
+BZ2_OBJDIR = $(OBJDIR)/bzip2-$(BZ2_VER)
+PYTHON_OBJDIR = $(OBJDIR)/python-$(PYTHON_VER)
+GETTEXT_OBJDIR = $(OBJDIR)/gettext-$(GETTEXT_VER)
+LZ4_OBJDIR = ${OBJDIR}/lz4-$(LZ4_VER)
+SVN_OBJDIR = $(OBJDIR)/$(SVN_REL_WC)
+
+# Tweak this for out-of-tree builds. Note that running individual
+# tests in the test suite won't work conveniently with out-of-tree
+# builds!
+svn_builddir ?=$(SVN_WC)
+
+ifdef PROFILE
+PROFILE_CFLAGS=-pg
+endif
+
+# We need this to make sure some targets below pick up the right libraries
+LD_LIBRARY_PATH=$(PREFIX)/apr/lib:$(PREFIX)/gettext/lib:$(PREFIX)/iconv/lib:$(PREFIX)/bdb/lib:$(PREFIX)/neon/lib:$(PREFIX)/serf/lib:$(PREFIX)/sqlite/lib:$(PREFIX)/cyrus-sasl/lib:$(PREFIX)/iconv/lib:$(PREFIX)/libmagic/lib:$(PREFIX)/ruby/lib:$(PREFIX)/python/lib:$(PREFIX)/svn-$(WC)/lib
+
+#######################################################################
+# Main targets.
+#######################################################################
+
+.PHONY: all reset clean nuke fetch
+
+all: dirs-create bdb-install apr-install iconv-install apr-util-install \
+ httpd-install neon-install serf-install serf-old-install \
+ sqlite-install cyrus-sasl-install libmagic-install \
+ ruby-install bz2-install python-install gettext-install \
+ lz4-install svn-install svn-bindings-install
+
+# Use these to start a build from the beginning.
+reset: dirs-reset bdb-reset apr-reset iconv-reset apr-util-reset \
+ httpd-reset neon-reset serf-reset serf-old-reset sqlite-reset \
+ cyrus-sasl-reset libmagic-reset ruby-reset python-reset \
+ bz2-reset gettext-reset lz4-reset svn-reset
+
+# Use to save disk space.
+clean: bdb-clean apr-clean iconv-clean apr-util-clean httpd-clean \
+ neon-clean serf-clean serf-old-clean sqlite-clean cyrus-sasl-clean \
+ libmagic-clean ruby-clean bz2-clean python-clean gettext-clean \
+ lz4-clean svn-clean
+
+# Nukes everything (including installed binaries!)
+# Use this to start ALL OVER AGAIN! Use with caution!
+nuke:
+ @echo
+ @echo "I will now remove the following directories PERMANENTLY:"
+ @echo
+ @echo " $(SRCDIR)"
+ @echo " $(OBJDIR)"
+ @echo " $(PREFIX)"
+ @echo
+ @echo -n 'Do you want me to continue? ([no]/yes): '
+ @read ANSWER ; \
+ case $$ANSWER in \
+ yes) echo "You said $$ANSWER. I will continue."; \
+ echo rm -rf $(SRCDIR) $(OBJDIR) $(PREFIX); \
+ rm -rf $(SRCDIR) $(OBJDIR) $(PREFIX); \
+ $(MAKE) reset; \
+ ;; \
+ "") echo "You said no."; \
+ ;; \
+ *) echo "You said $$ANSWER."; \
+ ;; \
+ esac
+
+fetch: $(DISTFILES)
+
+#######################################################################
+# directories
+#######################################################################
+
+dirs-create: $(PWD)/.dirs-created
+dirs-reset:
+ rm -f $(PWD)/.dirs-created
+
+$(PWD)/.dirs-created:
+ $(foreach d, $(PREFIX) $(DISTDIR) $(SRCDIR) $(OBJDIR), \
+ [ -d $(d) ] || mkdir -p $(d);)
+ touch $@
+
+#######################################################################
+# bdb
+#######################################################################
+
+bdb-retrieve: $(BDB_OBJDIR)/.retrieved
+bdb-configure: $(BDB_OBJDIR)/.configured
+bdb-compile: $(BDB_OBJDIR)/.compiled
+bdb-install: $(BDB_OBJDIR)/.installed
+bdb-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(BDB_OBJDIR)/$(f);)
+
+bdb-clean:
+ -(cd $(BDB_SRCDIR)/build_unix/ && env MAKEFLAGS= make clean)
+
+# fetch distfile for bdb
+$(DISTDIR)/$(BDB_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(BDB_URL)
+
+# retrieve bdb
+$(BDB_OBJDIR)/.retrieved: $(DISTDIR)/$(BDB_DIST)
+ $(call do_check_sha256,$(BDB_DIST))
+ [ -d $(BDB_OBJDIR) ] || mkdir -p $(BDB_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(BDB_DIST)
+ touch $@
+
+# configure bdb
+$(BDB_OBJDIR)/.configured: $(BDB_OBJDIR)/.retrieved
+ cd $(BDB_SRCDIR)/build_unix \
+ && env CFLAGS="-g $(PROFILE_CFLAGS)" GREP="`which grep`" \
+ ../dist/configure \
+ --prefix=$(PREFIX)/bdb \
+ --enable-debug
+ touch $@
+
+# compile bdb
+$(BDB_OBJDIR)/.compiled: $(BDB_OBJDIR)/.configured
+ (cd $(BDB_SRCDIR)/build_unix && env MAKEFLAGS= make)
+ touch $@
+
+# install bdb
+$(BDB_OBJDIR)/.installed: $(BDB_OBJDIR)/.compiled
+ (cd $(BDB_SRCDIR)/build_unix && env MAKEFLAGS= make install)
+ touch $@
+
+#######################################################################
+# apr
+#######################################################################
+
+apr-retrieve: $(APR_OBJDIR)/.retrieved
+apr-configure: $(APR_OBJDIR)/.configured
+apr-compile: $(APR_OBJDIR)/.compiled
+apr-install: $(APR_OBJDIR)/.installed
+apr-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(APR_OBJDIR)/$(f);)
+
+apr-clean:
+ -(cd $(APR_OBJDIR) && env MAKEFLAGS= make clean)
+
+# retrieve apr if not present yet
+$(APR_OBJDIR)/.retrieved:
+ [ -d $(APR_OBJDIR) ] || mkdir -p $(APR_OBJDIR)
+ if [ ! -d $(APR_SRCDIR) ]; then \
+ svn export $(APR_URL)/tags/$(APR_VER)/ $(APR_SRCDIR); \
+ fi
+ touch $@
+
+ifeq ($(THREADING),yes)
+THREADS_FLAG=--enable-threads
+else
+THREADS_FLAG=--disable-threads
+endif
+
+ifdef POOL_DEBUG
+POOL_DEBUG_FLAG=--enable-pool-debug=all
+else
+# Map apr_palloc()/apr_pool_{clear,destroy}() to malloc()/free().
+# This also puts poison bytes into freed memory to help detect use after free.
+POOL_DEBUG_FLAG=--enable-pool-debug=yes
+endif
+
+# configure apr
+$(APR_OBJDIR)/.configured: $(APR_OBJDIR)/.retrieved $(BDB_OBJDIR)/.installed
+ cd $(APR_SRCDIR) && ./buildconf
+ cd $(APR_OBJDIR) \
+ && env CFLAGS="-O0 -g $(PROFILE_CFLAGS)" GREP="`which grep`" \
+ $(APR_SRCDIR)/configure \
+ --prefix=$(PREFIX)/apr \
+ --enable-maintainer-mode \
+ $(THREADS_FLAG) \
+ $(POOL_DEBUG_FLAG)
+ touch $@
+
+# compile apr
+$(APR_OBJDIR)/.compiled: $(APR_OBJDIR)/.configured
+ (cd $(APR_OBJDIR) && env MAKEFLAGS= make)
+ touch $@
+
+# install apr
+$(APR_OBJDIR)/.installed: $(APR_OBJDIR)/.compiled
+ (cd $(APR_OBJDIR) && env MAKEFLAGS= make install)
+ touch $@
+
+#######################################################################
+# apr-iconv
+#######################################################################
+
+apr-iconv-retrieve: $(APR_ICONV_OBJDIR)/.retrieved
+apr-iconv-configure: $(APR_ICONV_OBJDIR)/.configured
+apr-iconv-compile: $(APR_ICONV_OBJDIR)/.compiled
+apr-iconv-install: $(APR_ICONV_OBJDIR)/.installed
+apr-iconv-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(APR_ICONV_OBJDIR)/$(f);)
+
+apr-iconv-clean:
+ -(cd $(APR_ICONV_OBJDIR) && env MAKEFLAGS= make clean)
+
+# fetch distfile for apr-iconv
+$(DISTDIR)/$(APR_ICONV_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(APR_ICONV_URL)
+
+# retrieve apr-iconv
+$(APR_ICONV_OBJDIR)/.retrieved: $(DISTDIR)/$(APR_ICONV_DIST)
+ $(call do_check_sha256,$(APR_ICONV_DIST))
+ [ -d $(APR_ICONV_OBJDIR) ] || mkdir -p $(APR_ICONV_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(APR_ICONV_DIST)
+ touch $@
+
+# configure apr-iconv
+$(APR_ICONV_OBJDIR)/.configured: $(APR_ICONV_OBJDIR)/.retrieved \
+ $(APR_OBJDIR)/.installed
+ cd $(APR_ICONV_OBJDIR) \
+ && env CFLAGS="-g $(PROFILE_CFLAGS) -DAPR_POOL_DEBUG" \
+ GREP="`which grep`" \
+ $(APR_ICONV_SRCDIR)/configure \
+ --prefix=$(PREFIX)/apr \
+ --with-apr=$(PREFIX)/apr
+ touch $@
+
+# compile apr-iconv
+$(APR_ICONV_OBJDIR)/.compiled: $(APR_ICONV_OBJDIR)/.configured
+ (cd $(APR_ICONV_OBJDIR) \
+ && env MAKEFLAGS= make CPPFLAGS="-D_OSD_POSIX" CFLAGS="-g -O0 $(PROFILE_CFLAGS)")
+ touch $@
+
+# install apr-iconv
+$(APR_ICONV_OBJDIR)/.installed: $(APR_ICONV_OBJDIR)/.compiled
+ (cd $(APR_ICONV_OBJDIR) && env MAKEFLAGS= make install)
+ touch $@
+
+#######################################################################
+# gnu-iconv
+#######################################################################
+
+gnu-iconv-retrieve: $(GNU_ICONV_OBJDIR)/.retrieved
+gnu-iconv-configure: $(GNU_ICONV_OBJDIR)/.configured
+gnu-iconv-compile: $(GNU_ICONV_OBJDIR)/.compiled
+gnu-iconv-install: $(GNU_ICONV_OBJDIR)/.installed
+gnu-iconv-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(GNU_ICONV_OBJDIR)/$(f);)
+
+gnu-iconv-clean:
+ -(cd $(GNU_ICONV_OBJDIR) && env MAKEFLAGS= make clean)
+ rm -f $(GNU_ICONV_OBJDIR)/lib_encodings.def.diff
+ rm -f $(GNU_ICONV_OBJDIR)/lib_aliases.gperf.diff
+
+# fetch distfile for gnu-iconv
+$(DISTDIR)/$(GNU_ICONV_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(GNU_ICONV_URL)
+
+$(GNU_ICONV_OBJDIR)/lib_encodings.def.diff:
+ mkdir -p $(dir $@)
+ echo > $@.tmp '--- lib/encodings.def.orig Wed Oct 24 23:41:41 2007'
+ echo >>$@.tmp '+++ lib/encodings.def Wed Oct 24 23:43:47 2007'
+ echo >>$@.tmp '@@ -37,6 +37,7 @@'
+ echo >>$@.tmp ' '
+ echo >>$@.tmp ' '
+ echo >>$@.tmp ' DEFENCODING(( "US-ASCII", /* IANA */'
+ echo >>$@.tmp '+ "646",'
+ echo >>$@.tmp ' "ASCII", /* IANA, JDK 1.1 */'
+ echo >>$@.tmp ' "ISO646-US", /* IANA */'
+ echo >>$@.tmp ' "ISO_646.IRV:1991", /* IANA */'
+ mv -f $@.tmp $@
+
+$(GNU_ICONV_OBJDIR)/lib_aliases.gperf.diff:
+ mkdir -p $(dir $@)
+ echo > $@.tmp '--- lib/aliases.gperf.orig Wed Oct 24 23:41:32 2007'
+ echo >>$@.tmp '+++ lib/aliases.gperf Wed Oct 24 23:47:38 2007'
+ echo >>$@.tmp '@@ -10,6 +10,7 @@ struct alias { int name; unsigned int encoding_index; '
+ echo >>$@.tmp ' %pic'
+ echo >>$@.tmp ' %%'
+ echo >>$@.tmp ' US-ASCII, ei_ascii'
+ echo >>$@.tmp '+646, ei_ascii'
+ echo >>$@.tmp ' ASCII, ei_ascii'
+ echo >>$@.tmp ' ISO646-US, ei_ascii'
+ echo >>$@.tmp ' ISO_646.IRV:1991, ei_ascii'
+ mv -f $@.tmp $@
+
+# retrieve gnu-iconv
+# Add 646 as an alias for ASCII to fix prop_test 22 on OpenBSD
+$(GNU_ICONV_OBJDIR)/.retrieved: $(DISTDIR)/$(GNU_ICONV_DIST) \
+ $(GNU_ICONV_OBJDIR)/lib_encodings.def.diff \
+ $(GNU_ICONV_OBJDIR)/lib_aliases.gperf.diff
+ $(call do_check_sha256,$(GNU_ICONV_DIST))
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(GNU_ICONV_DIST)
+ cd $(SRCDIR)/libiconv-$(GNU_ICONV_VER) && \
+ patch -p0 < $(GNU_ICONV_OBJDIR)/lib_encodings.def.diff && \
+ patch -p0 < $(GNU_ICONV_OBJDIR)/lib_aliases.gperf.diff
+ touch $@
+
+# configure gnu-iconv
+$(GNU_ICONV_OBJDIR)/.configured: $(GNU_ICONV_OBJDIR)/.retrieved
+ cd $(SRCDIR)/libiconv-${GNU_ICONV_VER} && \
+ ${MAKE} -f Makefile.devel lib/aliases.h
+ cd $(GNU_ICONV_OBJDIR) \
+ && env CFLAGS="-g $(PROFILE_CFLAGS)" GREP="`which grep`"\
+ $(GNU_ICONV_SRCDIR)/configure \
+ --prefix=$(PREFIX)/iconv \
+ --enable-extra-encodings
+ touch $@
+
+# compile gnu-iconv
+$(GNU_ICONV_OBJDIR)/.compiled: $(GNU_ICONV_OBJDIR)/.configured
+ (cd $(GNU_ICONV_OBJDIR) && env MAKEFLAGS= make)
+ touch $@
+
+# install gnu-iconv
+$(GNU_ICONV_OBJDIR)/.installed: $(GNU_ICONV_OBJDIR)/.compiled
+ (cd $(GNU_ICONV_OBJDIR) && env MAKEFLAGS= make install)
+ touch $@
+
+#######################################################################
+# iconv
+#######################################################################
+
+.PHONY: iconv-install iconv-reset iconv-clean
+
+ifeq ($(USE_APR_ICONV),yes)
+iconv-install: apr-iconv-install
+iconv-reset: apr-iconv-reset
+iconv-clean: apr-iconv-clean
+else
+iconv-install: gnu-iconv-install
+iconv-reset: gnu-iconv-reset
+iconv-clean: gnu-iconv-clean
+endif
+
+#######################################################################
+# apr-util
+#######################################################################
+
+apr-util-retrieve: $(APR_UTIL_OBJDIR)/.retrieved
+apr-util-configure: $(APR_UTIL_OBJDIR)/.configured
+apr-util-compile: $(APR_UTIL_OBJDIR)/.compiled
+apr-util-install: $(APR_UTIL_OBJDIR)/.installed
+apr-util-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(APR_UTIL_OBJDIR)/$(f);)
+
+apr-util-clean:
+ -(cd $(APR_UTIL_OBJDIR) && env MAKEFLAGS= make clean)
+
+
+# retrieve apr-util if not present yet
+$(APR_UTIL_OBJDIR)/.retrieved:
+ [ -d $(APR_UTIL_OBJDIR) ] || mkdir -p $(APR_UTIL_OBJDIR)
+ if [ ! -d $(APR_UTIL_SRCDIR) ]; then \
+ svn export $(APR_UTIL_URL)/tags/$(APR_UTIL_VER)/ \
+ $(APR_UTIL_SRCDIR); \
+ fi
+ touch $@
+
+ifeq ($(USE_APR_ICONV),yes)
+ICONV_FLAG=--with-iconv=$(PREFIX)/apr
+ICONV_OBJDIR=$(APR_ICONV_OBJDIR)
+else
+ICONV_FLAG=--with-iconv=$(PREFIX)/iconv
+ICONV_OBJDIR=$(GNU_ICONV_OBJDIR)
+endif
+
+# configure apr-util
+$(APR_UTIL_OBJDIR)/.configured: $(APR_UTIL_OBJDIR)/.retrieved \
+ $(APR_OBJDIR)/.installed $(ICONV_OBJDIR)/.installed
+ cd $(APR_UTIL_SRCDIR) && ./buildconf --with-apr=$(APR_SRCDIR)
+ cd $(APR_UTIL_OBJDIR) \
+ && env LD_LIBRARY_PATH=$(PREFIX)/bdb/lib \
+ CFLAGS="-O0 -g $(PROFILE_CFLAGS) -DAPR_POOL_DEBUG" \
+ GREP="`which grep`" \
+ $(APR_UTIL_SRCDIR)/configure \
+ --prefix=$(PREFIX)/apr \
+ --enable-maintainer-mode \
+ --with-apr=$(PREFIX)/apr \
+ --with-berkeley-db=$(PREFIX)/bdb \
+ $(ICONV_FLAG)
+ touch $@
+
+# compile apr-util
+$(APR_UTIL_OBJDIR)/.compiled: $(APR_UTIL_OBJDIR)/.configured
+ (cd $(APR_UTIL_OBJDIR) && env MAKEFLAGS= make)
+ touch $@
+
+# install apr-util
+$(APR_UTIL_OBJDIR)/.installed: $(APR_UTIL_OBJDIR)/.compiled
+ (cd $(APR_UTIL_OBJDIR) && env MAKEFLAGS= make install)
+ touch $@
+
+#######################################################################
+# httpd
+#######################################################################
+
+HTTPD_CONF= $(PREFIX)/httpd/conf/httpd-$(SVN_REL_WC).conf
+httpd-retrieve: $(HTTPD_OBJDIR)/.retrieved
+httpd-configure: $(HTTPD_OBJDIR)/.configured
+httpd-compile: $(HTTPD_OBJDIR)/.compiled
+httpd-install: $(HTTPD_OBJDIR)/.installed $(HTTPD_CONF)
+httpd-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(HTTPD_OBJDIR)/$(f);)
+
+httpd-clean:
+ -(cd $(HTTPD_OBJDIR) && env MAKEFLAGS= make clean)
+ -rm ${HTTPD_OBJDIR}/chil-engine.diff
+ -rm ${HTTPD_OBJDIR}/ssl-set-state.diff
+ -rm ${HTTPD_OBJDIR}/acinclude.diff
+
+# fetch distfile for httpd
+$(DISTDIR)/$(HTTPD_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(HTTPD_URL)
+
+$(HTTPD_OBJDIR)/chil-engine.diff:
+ mkdir -p $(dir $@)
+ echo > $@.tmp '--- modules/ssl/ssl_engine_init.c.orig Mon Apr 14 13:20:57 2014'
+ echo >>$@.tmp '+++ modules/ssl/ssl_engine_init.c Mon Apr 14 13:21:22 2014'
+ echo >>$@.tmp '@@ -406,9 +406,11 @@ void ssl_init_Engine(server_rec *s, apr_pool_t *p)'
+ echo >>$@.tmp ' ssl_die();'
+ echo >>$@.tmp ' }'
+ echo >>$@.tmp ' '
+ echo >>$@.tmp '+#ifdef ENGINE_CTRL_CHIL_SET_FORKCHECK'
+ echo >>$@.tmp ' if (strEQ(mc->szCryptoDevice, "chil")) {'
+ echo >>$@.tmp ' ENGINE_ctrl(e, ENGINE_CTRL_CHIL_SET_FORKCHECK, 1, 0, 0);'
+ echo >>$@.tmp ' }'
+ echo >>$@.tmp '+#endif'
+ echo >>$@.tmp ' '
+ echo >>$@.tmp ' if (!ENGINE_set_default(e, ENGINE_METHOD_ALL)) {'
+ echo >>$@.tmp ' ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,'
+ mv -f $@.tmp $@
+
+$(HTTPD_OBJDIR)/ssl-set-state.diff:
+ mkdir -p $(dir $@)
+ echo > $@.tmp '--- modules/ssl/ssl_toolkit_compat.h.orig Fri Feb 3 10:47:33 2017'
+ echo >>$@.tmp '+++ modules/ssl/ssl_toolkit_compat.h Fri Feb 3 10:52:17 2017'
+ echo >>$@.tmp '@@ -84,7 +84,9 @@'
+ echo >>$@.tmp ' #define SSL_get_state(ssl) SSL_state(ssl)'
+ echo >>$@.tmp ' #endif'
+ echo >>$@.tmp ' '
+ echo >>$@.tmp '+#ifndef HAVE_SSL_SET_STATE'
+ echo >>$@.tmp ' #define SSL_set_state(ssl,val) (ssl)->state = val'
+ echo >>$@.tmp '+#endif'
+ echo >>$@.tmp ' '
+ echo >>$@.tmp ' #define MODSSL_BIO_CB_ARG_TYPE const char'
+ echo >>$@.tmp ' #define MODSSL_CRYPTO_CB_ARG_TYPE const char'
+ mv -f $@.tmp $@
+
+$(HTTPD_OBJDIR)/acinclude.diff:
+ mkdir -p $(dir $@)
+ echo >$@.tmp '--- acinclude.m4.orig Fri Feb 3 11:05:08 2017'
+ echo >>$@.tmp '+++ acinclude.m4 Fri Feb 3 11:05:15 2017'
+ echo >>$@.tmp '@@ -455,6 +455,7 @@'
+ echo >>$@.tmp ' AC_CHECK_HEADERS([openssl/engine.h])'
+ echo >>$@.tmp ' AC_CHECK_FUNCS([SSLeay_version SSL_CTX_new], [], [liberrors="yes"])'
+ echo >>$@.tmp ' AC_CHECK_FUNCS([ENGINE_init ENGINE_load_builtin_engines])'
+ echo >>$@.tmp '+ AC_CHECK_FUNCS(SSL_set_state)'
+ echo >>$@.tmp ' else'
+ echo >>$@.tmp ' AC_CHECK_FUNCS([SSLC_library_version SSL_CTX_new], [], [liberrors="yes"])'
+ echo >>$@.tmp ' AC_CHECK_FUNCS(SSL_set_state)'
+ mv -f $@.tmp $@
+
+# retrieve httpd
+$(HTTPD_OBJDIR)/.retrieved: $(DISTDIR)/$(HTTPD_DIST) \
+ $(HTTPD_OBJDIR)/chil-engine.diff $(HTTPD_OBJDIR)/ssl-set-state.diff \
+ $(HTTPD_OBJDIR)/acinclude.diff
+ $(call do_check_sha256,$(HTTPD_DIST))
+ [ -d $(HTTPD_OBJDIR) ] || mkdir -p $(HTTPD_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(HTTPD_DIST)
+ cd $(HTTPD_SRCDIR) && patch -p0 < $(HTTPD_OBJDIR)/chil-engine.diff
+ cd $(HTTPD_SRCDIR) && patch -p0 < $(HTTPD_OBJDIR)/ssl-set-state.diff
+ cd $(HTTPD_SRCDIR) && patch -p0 < $(HTTPD_OBJDIR)/acinclude.diff
+ cp $(HTTPD_SRCDIR)/modules/ssl/ssl_toolkit_compat.h \
+ $(HTTPD_SRCDIR)/modules/ssl/ssl_toolkit_compat.h.orig
+ sed '/^#define HAVE_SSL_RAND_EGD/d' \
+ < $(HTTPD_SRCDIR)/modules/ssl/ssl_toolkit_compat.h.orig \
+ > $(HTTPD_SRCDIR)/modules/ssl/ssl_toolkit_compat.h
+ cp $(HTTPD_SRCDIR)/modules/ssl/ssl_engine_vars.c \
+ $(HTTPD_SRCDIR)/modules/ssl/ssl_engine_vars.c.orig
+ sed 's/^\(#if (OPENSSL_VERSION_NUMBER >= 0x00908000)\)$$/\1 \&\& !defined(OPENSSL_NO_COMP)/' \
+ < $(HTTPD_SRCDIR)/modules/ssl/ssl_engine_vars.c.orig \
+ > $(HTTPD_SRCDIR)/modules/ssl/ssl_engine_vars.c
+ cp $(HTTPD_SRCDIR)/modules/ssl/ssl_engine_init.c \
+ $(HTTPD_SRCDIR)/modules/ssl/ssl_engine_init.c.orig
+ $(foreach f, ssl_engine_init.c ssl_util_ssl.c ssl_util_ssl.h, \
+ cp $(HTTPD_SRCDIR)/modules/ssl/${f} $(HTTPD_SRCDIR)/modules/ssl/${f}.orig; \
+ sed 's/SSL_CTX_use_certificate_chain/_SSL_CTX_use_certificate_chain/' \
+ < $(HTTPD_SRCDIR)/modules/ssl/${f}.orig \
+ > $(HTTPD_SRCDIR)/modules/ssl/${f};\
+ )
+ touch $@
+
+# configure httpd
+$(HTTPD_OBJDIR)/.configured: $(HTTPD_OBJDIR)/.retrieved \
+ $(APR_UTIL_OBJDIR)/.installed
+ cd $(HTTPD_SRCDIR) && ./buildconf
+ cd $(HTTPD_OBJDIR) \
+ && env CFLAGS="-g $(PROFILE_CFLAGS) -DAPR_POOL_DEBUG" \
+ GREP="`which grep`" \
+ $(HTTPD_SRCDIR)/configure \
+ --prefix=$(PREFIX)/httpd \
+ --enable-maintainer-mode \
+ --enable-ssl \
+ --enable-dav \
+ --enable-proxy \
+ --with-mpm=prefork \
+ --with-apr="$(PREFIX)/apr" \
+ --with-apr-util="$(PREFIX)/apr"
+ touch $@
+
+# compile httpd
+$(HTTPD_OBJDIR)/.compiled: $(HTTPD_OBJDIR)/.configured
+ (cd $(HTTPD_OBJDIR) && env MAKEFLAGS= make)
+ touch $@
+
+# install httpd
+$(HTTPD_OBJDIR)/.installed: $(HTTPD_OBJDIR)/.compiled
+ (cd $(HTTPD_OBJDIR) && env MAKEFLAGS= make install)
+ touch $@
+
+# create a httpd.conf for mod_dav_svn
+$(HTTPD_CONF):
+ mkdir -p $(dir $@)
+ echo > $@.tmp '# httpd config for use with mod_dav_svn'
+ echo >>$@.tmp 'ServerRoot "$(PREFIX)/httpd"'
+ echo >>$@.tmp 'Listen localhost:8080'
+ echo >>$@.tmp 'LoadModule dav_svn_module modules/svn-$(WC)/mod_dav_svn.so'
+ echo >>$@.tmp 'LoadModule authz_svn_module modules/svn-$(WC)/mod_authz_svn.so'
+ echo >>$@.tmp 'DocumentRoot "$(PREFIX)/httpd/htdocs"'
+ echo >>$@.tmp '<Directory />'
+ echo >>$@.tmp ' Options FollowSymLinks'
+ echo >>$@.tmp ' AllowOverride None'
+ echo >>$@.tmp ' Order deny,allow'
+ echo >>$@.tmp ' Deny from all'
+ echo >>$@.tmp '</Directory>'
+ echo >>$@.tmp '<Location /repos>'
+ echo >>$@.tmp ' DAV svn'
+ echo >>$@.tmp ' SVNPath /tmp/svn-sandbox/repos'
+ echo >>$@.tmp ' Allow from localhost'
+ echo >>$@.tmp '</Location>'
+ mv -f $@.tmp $@
+
+#######################################################################
+# neon
+#######################################################################
+
+neon-retrieve: $(NEON_OBJDIR)/.retrieved
+neon-configure: $(NEON_OBJDIR)/.configured
+neon-compile: $(NEON_OBJDIR)/.compiled
+neon-install: $(NEON_OBJDIR)/.installed
+neon-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(NEON_OBJDIR)/$(f);)
+
+neon-clean:
+ -(cd $(NEON_OBJDIR) && env MAKEFLAGS= make clean)
+
+# fetch distfile for neon
+$(DISTDIR)/$(NEON_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(NEON_URL)
+
+# retrieve neon
+$(NEON_OBJDIR)/.retrieved: $(DISTDIR)/$(NEON_DIST)
+ $(call do_check_sha256,$(NEON_DIST))
+ [ -d $(NEON_OBJDIR) ] || mkdir -p $(NEON_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(NEON_DIST)
+ touch $@
+
+# OpenBSD does not have krb5-config in PATH, but the neon port has
+# a suitable replacement.
+ifeq ($(UNAME),OpenBSD)
+KRB5_CONFIG_PATH=/usr/ports/net/neon/files
+endif
+
+# configure neon
+$(NEON_OBJDIR)/.configured: $(NEON_OBJDIR)/.retrieved
+ cd $(NEON_SRCDIR) && ./autogen.sh
+ if [ -n "$(KRB5_CONFIG_PATH)" ] && [ -d "$(KRB5_CONFIG_PATH)" ]; then \
+ cp $(KRB5_CONFIG_PATH)/krb5-config $(NEON_OBJDIR); \
+ chmod +x $(NEON_OBJDIR)/krb5-config; \
+ fi
+ cd $(NEON_OBJDIR) \
+ && env CFLAGS="-g $(PROFILE_CFLAGS)" GREP="`which grep`" \
+ $(NEON_SRCDIR)/configure \
+ PATH=$(NEON_OBJDIR):$$PATH \
+ --prefix=$(PREFIX)/neon \
+ --with-ssl \
+ --enable-shared \
+ --without-libproxy
+ touch $@
+
+# compile neon
+$(NEON_OBJDIR)/.compiled: $(NEON_OBJDIR)/.configured
+ (cd $(NEON_OBJDIR) && env MAKEFLAGS= make)
+ touch $@
+
+# install neon
+$(NEON_OBJDIR)/.installed: $(NEON_OBJDIR)/.compiled
+ (cd $(NEON_OBJDIR) && env MAKEFLAGS= make install)
+ touch $@
+
+
+#######################################################################
+# serf
+#######################################################################
+
+serf-retrieve: $(SERF_OBJDIR)/.retrieved
+serf-configure: $(SERF_OBJDIR)/.configured
+serf-compile: $(SERF_OBJDIR)/.compiled
+serf-install: $(SERF_OBJDIR)/.installed
+serf-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(SERF_OBJDIR)/$(f);)
+
+serf-clean:
+ -(cd $(SERF_SRCDIR) && scons -c)
+
+
+# fetch distfile for serf
+#$(DISTDIR)/$(SERF_DIST):
+# cd $(DISTDIR) && $(FETCH_CMD) $(SERF_URL)
+#
+# retrieve serf
+#$(SERF_OBJDIR)/.retrieved: $(DISTDIR)/$(SERF_DIST)
+# [ -d $(SERF_OBJDIR) ] || mkdir -p $(SERF_OBJDIR)
+# tar -C $(SRCDIR) -zxf $(DISTDIR)/$(SERF_DIST)
+# touch $@
+
+# retrieve serf if not present yet
+$(SERF_OBJDIR)/.retrieved:
+ [ -d $(SERF_OBJDIR) ] || mkdir -p $(SERF_OBJDIR)
+ if [ ! -d $(SERF_SRCDIR) ]; then \
+ svn co $(SERF_URL) $(SERF_SRCDIR); \
+ svn merge ^/serf/branches/1.3.x-sslbuild@1781542 $(SERF_SRCDIR); \
+ fi
+ touch $@
+
+# compile serf (serf won't compile outside its source tree)
+$(SERF_OBJDIR)/.compiled: $(SERF_OBJDIR)/.retrieved \
+ $(APR_UTIL_OBJDIR)/.installed
+ cd $(SERF_SRCDIR) && \
+ scons DEBUG=1 \
+ CFLAGS="-O0 -g $(PROFILE_CFLAGS) -DAPR_POOL_DEBUG" \
+ APR=$(PREFIX)/apr \
+ APU=$(PREFIX)/apr \
+ PREFIX=$(PREFIX)/serf
+ touch $@
+
+# install serf
+$(SERF_OBJDIR)/.installed: $(SERF_OBJDIR)/.compiled
+ rm -rf $(PREFIX)/serf # XXX scons cannot reinstall :(
+ cd $(SERF_SRCDIR) && \
+ scons install
+ # work around unportable scons shared lib support
+ -ln -s libserf-1.so.$(shell echo $(SERF_VER) | sed -e 's/[0-9]$$/0/') \
+ $(PREFIX)/serf/lib/libserf-1.so
+ touch $@
+
+#######################################################################
+# serf-old (compatible with Subversion 1.5)
+#######################################################################
+
+serf-old-retrieve: $(SERF_OLD_OBJDIR)/.retrieved
+serf-old-configure: $(SERF_OLD_OBJDIR)/.configured
+serf-old-compile: $(SERF_OLD_OBJDIR)/.compiled
+serf-old-install: $(SERF_OLD_OBJDIR)/.installed
+serf-old-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(SERF_OLD_OBJDIR)/$(f);)
+
+serf-old-clean:
+ -(cd $(SERF_OLD_SRCDIR) && ./serfmake clean)
+
+# retrieve serf if not present yet
+$(SERF_OLD_OBJDIR)/.retrieved:
+ [ -d $(SERF_OLD_OBJDIR) ] || mkdir -p $(SERF_OLD_OBJDIR)
+ if [ ! -d $(SERF_OLD_SRCDIR) ]; then \
+ svn export $(SERF_OLD_URL) $(SERF_OLD_SRCDIR); \
+ fi
+ touch $@
+
+# compile serf (serf won't compile outside its source tree)
+$(SERF_OLD_OBJDIR)/.compiled: $(SERF_OLD_OBJDIR)/.retrieved \
+ $(APR_UTIL_OBJDIR)/.installed
+ cd $(SERF_OLD_SRCDIR) && \
+ env CFLAGS="-O0 -g $(PROFILE_CFLAGS) -DAPR_POOL_DEBUG" \
+ ./serfmake --with-apr=$(PREFIX)/apr \
+ --prefix=$(PREFIX)/serf-old \
+ build
+ touch $@
+
+# install serf
+$(SERF_OLD_OBJDIR)/.installed: $(SERF_OLD_OBJDIR)/.compiled
+ cd $(SERF_OLD_SRCDIR) && \
+ ./serfmake --with-apr=$(PREFIX)/apr \
+ --with-apr-util=$(PREFIX)/apr \
+ --prefix=$(PREFIX)/serf-old \
+ install
+ touch $@
+
+
+#######################################################################
+# sqlite
+#######################################################################
+
+sqlite-retrieve: $(SQLITE_OBJDIR)/.retrieved
+sqlite-configure: $(SQLITE_OBJDIR)/.configured
+sqlite-compile: $(SQLITE_OBJDIR)/.compiled
+sqlite-install: $(SQLITE_OBJDIR)/.installed
+sqlite-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(SQLITE_OBJDIR)/$(f);)
+
+sqlite-clean:
+ -cd $(SQLITE_OBJDIR) && env MAKEFLAGS= make clean
+
+# fetch distfile for sqlite
+$(DISTDIR)/$(SQLITE_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(SQLITE_URL)
+
+# retrieve sqlite
+$(SQLITE_OBJDIR)/.retrieved: $(DISTDIR)/$(SQLITE_DIST)
+ $(call do_check_sha256,$(SQLITE_DIST))
+ [ -d $(SQLITE_OBJDIR) ] || mkdir -p $(SQLITE_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(SQLITE_DIST)
+ touch $@
+
+ifeq ($(THREADING),yes)
+THREADSAFE_FLAG=--enable-threadsafe
+else
+THREADSAFE_FLAG=--disable-threadsafe
+endif
+
+# configure sqlite
+$(SQLITE_OBJDIR)/.configured: $(SQLITE_OBJDIR)/.retrieved
+ cd $(SQLITE_OBJDIR) \
+ && env CFLAGS="-g $(PROFILE_CFLAGS)" GREP="`which grep`" \
+ $(SQLITE_SRCDIR)/configure \
+ --prefix=$(PREFIX)/sqlite \
+ $(THREADSAFE_FLAG)
+ touch $@
+
+# compile sqlite
+$(SQLITE_OBJDIR)/.compiled: $(SQLITE_OBJDIR)/.configured
+ (cd $(SQLITE_OBJDIR) && env MAKEFLAGS= make)
+ touch $@
+
+# install sqlite
+$(SQLITE_OBJDIR)/.installed: $(SQLITE_OBJDIR)/.compiled
+ (cd $(SQLITE_OBJDIR) && env MAKEFLAGS= make install)
+ touch $@
+
+#######################################################################
+# cyrus-sasl
+#######################################################################
+
+cyrus-sasl-retrieve: $(CYRUS_SASL_OBJDIR)/.retrieved
+cyrus-sasl-configure: $(CYRUS_SASL_OBJDIR)/.configured
+cyrus-sasl-compile: $(CYRUS_SASL_OBJDIR)/.compiled
+cyrus-sasl-install: $(CYRUS_SASL_OBJDIR)/.installed
+cyrus-sasl-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(CYRUS_SASL_OBJDIR)/$(f);)
+
+cyrus-sasl-clean:
+ -(cd $(CYRUS_SASL_OBJDIR) && env MAKEFLAGS= make distclean)
+
+# fetch distfile for cyrus-sasl
+$(DISTDIR)/$(CYRUS_SASL_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(CYRUS_SASL_URL)
+
+# retrieve cyrus-sasl
+$(CYRUS_SASL_OBJDIR)/.retrieved: $(DISTDIR)/$(CYRUS_SASL_DIST)
+ $(call do_check_sha256,$(CYRUS_SASL_DIST))
+ [ -d $(CYRUS_SASL_OBJDIR) ] || mkdir -p $(CYRUS_SASL_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(CYRUS_SASL_DIST)
+ # fixes build on Debian:
+ sed 's/#elif WITH_DES/#elif defined(WITH_DES)/' \
+ < $(CYRUS_SASL_SRCDIR)/plugins/digestmd5.c \
+ > $(CYRUS_SASL_SRCDIR)/plugins/digestmd5.c.patched
+ mv $(CYRUS_SASL_SRCDIR)/plugins/digestmd5.c.patched \
+ $(CYRUS_SASL_SRCDIR)/plugins/digestmd5.c
+ifeq ($(UNAME),OpenBSD)
+ # Fixes GSSAPI support on OpenBSD, which hasn't got libroken:
+ for f in `grep -l -R -- -lroken $(CYRUS_SASL_SRCDIR)`; do \
+ sed -e 's/-lroken//g' < $$f > $$f.tmp && \
+ mv $$f.tmp $$f; \
+ done
+ chmod +x $(CYRUS_SASL_SRCDIR)/configure
+endif
+ # Fixes excessive auth log spam from sasl if broken .la files exist
+ sed 's/SASL_LOG_WARN/SASL_LOG_DEBUG/' \
+ < $(CYRUS_SASL_SRCDIR)/lib/dlopen.c \
+ > $(CYRUS_SASL_SRCDIR)/lib/dlopen.c.patched
+ mv $(CYRUS_SASL_SRCDIR)/lib/dlopen.c.patched \
+ $(CYRUS_SASL_SRCDIR)/lib/dlopen.c
+ # Fix a weird autotools error about missing cmulocal dir
+ (cd $(CYRUS_SASL_SRCDIR)/saslauthd/ && ln -sf ../cmulocal)
+ touch $@
+
+# configure cyrus-sasl
+$(CYRUS_SASL_OBJDIR)/.configured: $(CYRUS_SASL_OBJDIR)/.retrieved \
+ $(BDB_OBJDIR)/.installed $(SQLITE_OBJDIR)/.installed
+ cd $(CYRUS_SASL_OBJDIR) \
+ && env CFLAGS="-g $(PROFILE_CFLAGS)" \
+ CPPFLAGS="-I/usr/include/kerberosV" \
+ GREP="`which grep`" \
+ $(CYRUS_SASL_SRCDIR)/configure \
+ --with-dbpath=$(PREFIX)/cyrus-sasl/etc/sasldb2 \
+ --with-plugindir=$(PREFIX)/cyrus-sasl/lib/sasl2 \
+ --with-configdir=$(PREFIX)/cyrus-sasl/lib/sasl2 \
+ --with-bdb-libdir=$(PREFIX)/bdb/lib \
+ --with-bdb-incdir=$(PREFIX)/bdb/include \
+ --with-dblib=berkeley \
+ --with-sqlite=$(PREFIX)/sqlite \
+ --prefix=$(PREFIX)/cyrus-sasl
+ touch $@
+
+# compile cyrus-sasl
+$(CYRUS_SASL_OBJDIR)/.compiled: $(CYRUS_SASL_OBJDIR)/.configured
+ (cd $(CYRUS_SASL_OBJDIR) && env MAKEFLAGS= make)
+ touch $@
+
+# install cyrus-sasl
+$(CYRUS_SASL_OBJDIR)/.installed: $(CYRUS_SASL_OBJDIR)/.compiled
+ (cd $(CYRUS_SASL_OBJDIR) && env MAKEFLAGS= make install)
+ touch $@
+
+#######################################################################
+# libmagic
+#######################################################################
+
+libmagic-retrieve: $(LIBMAGIC_OBJDIR)/.retrieved
+libmagic-configure: $(LIBMAGIC_OBJDIR)/.configured
+libmagic-compile: $(LIBMAGIC_OBJDIR)/.compiled
+libmagic-install: $(LIBMAGIC_OBJDIR)/.installed
+libmagic-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(LIBMAGIC_OBJDIR)/$(f);)
+
+libmagic-clean:
+ -(cd $(LIBMAGIC_OBJDIR) && env MAKEFLAGS= make distclean)
+
+# fetch distfile for libmagic
+$(DISTDIR)/$(LIBMAGIC_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(LIBMAGIC_URL)
+
+# retrieve libmagic
+$(LIBMAGIC_OBJDIR)/.retrieved: $(DISTDIR)/$(LIBMAGIC_DIST)
+ $(call do_check_sha256,$(LIBMAGIC_DIST))
+ [ -d $(LIBMAGIC_OBJDIR) ] || mkdir -p $(LIBMAGIC_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(LIBMAGIC_DIST)
+ touch $@
+
+# configure libmagic
+$(LIBMAGIC_OBJDIR)/.configured: $(LIBMAGIC_OBJDIR)/.retrieved
+ cd $(LIBMAGIC_OBJDIR) \
+ && env CFLAGS="-g $(PROFILE_CFLAGS)" GREP="`which grep`"\
+ $(LIBMAGIC_SRCDIR)/configure \
+ --enable-fsect-man5 \
+ --prefix=$(PREFIX)/libmagic
+ touch $@
+
+# compile libmagic
+$(LIBMAGIC_OBJDIR)/.compiled: $(LIBMAGIC_OBJDIR)/.configured
+ (cd $(LIBMAGIC_OBJDIR) && env MAKEFLAGS= make)
+ touch $@
+
+# install libmagic
+$(LIBMAGIC_OBJDIR)/.installed: $(LIBMAGIC_OBJDIR)/.compiled
+ (cd $(LIBMAGIC_OBJDIR) && env MAKEFLAGS= make install)
+ touch $@
+
+#######################################################################
+# ruby
+#######################################################################
+
+ruby-retrieve: $(RUBY_OBJDIR)/.retrieved
+ruby-configure: $(RUBY_OBJDIR)/.configured
+ruby-compile: $(RUBY_OBJDIR)/.compiled
+ruby-install: $(RUBY_OBJDIR)/.installed
+ruby-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(RUBY_OBJDIR)/$(f);)
+
+ruby-clean:
+ -(cd $(RUBY_OBJDIR) && env MAKEFLAGS= make distclean)
+
+# fetch distfile for ruby
+$(DISTDIR)/$(RUBY_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(RUBY_URL)
+
+# retrieve ruby
+#
+$(RUBY_OBJDIR)/.retrieved: $(DISTDIR)/$(RUBY_DIST)
+ $(call do_check_sha256,$(RUBY_DIST))
+ [ -d $(RUBY_OBJDIR) ] || mkdir -p $(RUBY_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(RUBY_DIST)
+ -which ghead && sed -i -e "s/head -c/ghead -c/" $(RUBY_SRCDIR)/configure
+ touch $@
+
+ifeq ($(THREADING),yes)
+THREADSAFE_FLAG=--enable-pthread
+else
+THREADSAFE_FLAG=--disable-pthread
+endif
+
+# configure ruby
+$(RUBY_OBJDIR)/.configured: $(RUBY_OBJDIR)/.retrieved
+ cd $(RUBY_OBJDIR) \
+ && env CFLAGS="-g $(PROFILE_CFLAGS)" GREP="`which grep`"\
+ $(RUBY_SRCDIR)/configure \
+ --prefix=$(PREFIX)/ruby \
+ --enable-shared \
+ --with-baseruby="$(RUBY)" \
+ $(THREADSAFE_FLAG) \
+ --disable-install-doc \
+ --without-valgrind \
+ --without-gmp
+ touch $@
+
+# compile ruby
+$(RUBY_OBJDIR)/.compiled: $(RUBY_OBJDIR)/.configured
+ (cd $(RUBY_OBJDIR) && env MAKEFLAGS= make)
+ touch $@
+
+# install ruby
+$(RUBY_OBJDIR)/.installed: $(RUBY_OBJDIR)/.compiled
+ (cd $(RUBY_OBJDIR) && env MAKEFLAGS= make install)
+ touch $@
+
+#######################################################################
+# bz2
+#######################################################################
+
+bz2-retrieve: $(BZ2_OBJDIR)/.retrieved
+bz2-compile: $(BZ2_OBJDIR)/.compiled
+bz2-install: $(BZ2_OBJDIR)/.installed
+bz2-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(BZ2_OBJDIR)/$(f);)
+
+bz2-clean:
+ -(cd $(BZ2_SRCDIR) && env MAKEFLAGS= make distclean)
+
+# fetch distfile for bz2
+$(DISTDIR)/$(BZ2_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(BZ2_URL)
+
+# retrieve bz2
+$(BZ2_OBJDIR)/.retrieved: $(DISTDIR)/$(BZ2_DIST)
+ $(call do_check_sha256,$(BZ2_DIST))
+ [ -d $(BZ2_OBJDIR) ] || mkdir -p $(BZ2_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(BZ2_DIST)
+ touch $@
+
+# compile bz2
+$(BZ2_OBJDIR)/.compiled: $(BZ2_OBJDIR)/.retrieved
+ (cd $(BZ2_SRCDIR) && env MAKEFLAGS= make CFLAGS="-g $(PROFILE_CFLAGS) -fPIC")
+ touch $@
+
+# install bz2
+$(BZ2_OBJDIR)/.installed: $(BZ2_OBJDIR)/.compiled
+ (cd $(BZ2_SRCDIR) && env MAKEFLAGS= make install PREFIX=$(PREFIX)/bz2)
+ touch $@
+
+
+#######################################################################
+# python
+#######################################################################
+
+python-retrieve: $(PYTHON_OBJDIR)/.retrieved
+python-configure: $(PYTHON_OBJDIR)/.configured
+python-compile: $(PYTHON_OBJDIR)/.compiled
+python-install: $(PYTHON_OBJDIR)/.installed
+python-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(PYTHON_OBJDIR)/$(f);)
+
+python-clean:
+ -(cd $(PYTHON_OBJDIR) && env MAKEFLAGS= make distclean)
+
+# fetch distfile for python
+$(DISTDIR)/$(PYTHON_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(PYTHON_URL)
+
+# https://bugs.python.org/issue12560
+$(DISTDIR)/python-issue12560.patch:
+ cd $(DISTDIR) && $(FETCH_CMD) -O "$@" \
+ https://hg.python.org/cpython/raw-rev/32cc37a89b58
+
+# retrieve python
+#
+$(PYTHON_OBJDIR)/.retrieved: $(DISTDIR)/$(PYTHON_DIST) $(DISTDIR)/python-issue12560.patch
+ $(call do_check_sha256,$(PYTHON_DIST))
+ [ -d $(PYTHON_OBJDIR) ] || mkdir -p $(PYTHON_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(PYTHON_DIST)
+ # Make setup.py use our own dependencies instead of system ones
+ sed -e "s#sqlite_inc_paths = \[ '/usr/include',#sqlite_inc_paths = [ '$(PREFIX)/sqlite/include',#" \
+ -e "s#'/usr/include/db4'#'$(PREFIX)/bdb/include'#" \
+ -e "s|\(add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib')\)|pass #\1|" \
+ -e "s|\(add_dir_to_list(self.compiler.include_dirs, '/usr/local/include')\)|pass #\1|" \
+ -e "s#find_library_file(lib_dirs, 'bz2'#find_library_file(['$(PREFIX)/bz2/lib'] + lib_dirs, 'bz2'#" \
+ < $(PYTHON_SRCDIR)/setup.py \
+ > $(PYTHON_SRCDIR)/setup.py.patched
+ mv $(PYTHON_SRCDIR)/setup.py.patched $(PYTHON_SRCDIR)/setup.py
+ chmod +x $(PYTHON_SRCDIR)/setup.py
+ cd $(PYTHON_SRCDIR) && patch -p1 < $(DISTDIR)/python-issue12560.patch
+ touch $@
+
+# configure python
+ifdef PROFILE
+PYTHON_PROFILING=--enable-profiling
+endif
+$(PYTHON_OBJDIR)/.configured: $(PYTHON_OBJDIR)/.retrieved \
+ $(BZ2_OBJDIR)/.installed
+ cd $(PYTHON_OBJDIR) \
+ && env CFLAGS="-g $(PROFILE_CFLAGS)" GREP="`which grep`" \
+ CPPFLAGS="-I$(PREFIX)/bz2/include" \
+ LDFLAGS="-Wl,-rpath=$(PREFIX)/python/lib -L$(PREFIX)/bz2/lib" \
+ LD_LIBRARY_PATH="$(LD_LIBRARY_PATH):$$LD_LIBRARY_PATH" \
+ $(PYTHON_SRCDIR)/configure \
+ --prefix=$(PREFIX)/python \
+ --enable-shared \
+ --with-system-expat \
+ --with-dbmliborder=bdb \
+ $(PYTHON_PROFILING)
+ touch $@
+
+# compile python
+$(PYTHON_OBJDIR)/.compiled: $(PYTHON_OBJDIR)/.configured
+ (cd $(PYTHON_OBJDIR) && \
+ env MAKEFLAGS= \
+ LD_LIBRARY_PATH="$(LD_LIBRARY_PATH):$$LD_LIBRARY_PATH" \
+ make)
+ touch $@
+
+# install python
+$(PYTHON_OBJDIR)/.installed: $(PYTHON_OBJDIR)/.compiled
+ (cd $(PYTHON_OBJDIR) && \
+ env MAKEFLAGS= \
+ LD_LIBRARY_PATH="$(LD_LIBRARY_PATH):$$LD_LIBRARY_PATH" \
+ make install)
+ touch $@
+
+
+#######################################################################
+# junit
+#######################################################################
+
+# fetch distfile for junit
+$(DISTDIR)/$(JUNIT_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(JUNIT_URL)
+ $(call do_check_sha256,$(JUNIT_DIST))
+
+
+#######################################################################
+# gettext
+#######################################################################
+
+gettext-retrieve: $(GETTEXT_OBJDIR)/.retrieved
+gettext-configure: $(GETTEXT_OBJDIR)/.configured
+gettext-compile: $(GETTEXT_OBJDIR)/.compiled
+gettext-install: $(GETTEXT_OBJDIR)/.installed
+gettext-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(GETTEXT_OBJDIR)/$(f);)
+
+gettext-clean:
+ -(cd $(GETTEXT_SRCDIR) && env MAKEFLAGS= make clean)
+
+# fetch distfile for gettext
+$(DISTDIR)/$(GETTEXT_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) $(GETTEXT_URL)
+
+# retrieve gettext
+$(GETTEXT_OBJDIR)/.retrieved: $(DISTDIR)/$(GETTEXT_DIST)
+ $(call do_check_sha256,$(GETTEXT_DIST))
+ [ -d $(GETTEXT_OBJDIR) ] || mkdir -p $(GETTEXT_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(GETTEXT_DIST)
+ touch $@
+
+# (gettext won't compile outside its source tree)
+# configure gettext
+$(GETTEXT_OBJDIR)/.configured: $(GETTEXT_OBJDIR)/.retrieved
+ cd $(GETTEXT_SRCDIR) \
+ && env CFLAGS="-g $(PROFILE_CFLAGS)" GREP="`which grep`"\
+ LDFLAGS="-L$(PREFIX)/iconv/lib" \
+ $(GETTEXT_SRCDIR)/configure \
+ --prefix=$(PREFIX)/gettext \
+ --with-libiconv-prefix=$(PREFIX)/iconv \
+ --disable-c++ \
+ --disable-java \
+ --disable-csharp \
+ $(THREADS_FLAG)
+ -which gsed && \
+ sed -e 's/sed /gsed /g' < $(GETTEXT_SRCDIR)/build-aux/moopp \
+ > $(GETTEXT_SRCDIR)/build-aux/moopp.fixed && \
+ mv $(GETTEXT_SRCDIR)/build-aux/moopp.fixed \
+ $(GETTEXT_SRCDIR)/build-aux/moopp && \
+ chmod +x $(GETTEXT_SRCDIR)/build-aux/moopp
+ touch $@
+
+# compile gettext
+$(GETTEXT_OBJDIR)/.compiled: $(GETTEXT_OBJDIR)/.configured
+ (cd $(GETTEXT_SRCDIR) && env MAKEFLAGS= make)
+ touch $@
+
+# install gettext
+$(GETTEXT_OBJDIR)/.installed: $(GETTEXT_OBJDIR)/.compiled
+ (cd $(GETTEXT_SRCDIR) && env MAKEFLAGS= make install)
+ touch $@
+
+#######################################################################
+# lz4
+#######################################################################
+
+lz4-retrieve: $(LZ4_OBJDIR)/.retrieved
+lz4-configure: $(LZ4_OBJDIR)/.configured
+lz4-compile: $(LZ4_OBJDIR)/.compiled
+lz4-install: $(LZ4_OBJDIR)/.installed
+lz4-reset:
+ $(foreach f, .retrieved .configured .compiled .installed, \
+ rm -f $(LZ4_OBJDIR)/$(f);)
+
+lz4-clean:
+ -(cd $(LZ4_SRCDIR) && env MAKEFLAGS= $(MAKE) clean)
+
+# fetch distfile for lz4
+$(DISTDIR)/$(LZ4_DIST):
+ cd $(DISTDIR) && $(FETCH_CMD) -O $(LZ4_DIST) $(LZ4_URL)
+
+# retrieve lz4
+$(LZ4_OBJDIR)/.retrieved: $(DISTDIR)/$(LZ4_DIST)
+ $(call do_check_sha256,$(LZ4_DIST))
+ [ -d $(LZ4_OBJDIR) ] || mkdir -p $(LZ4_OBJDIR)
+ tar -C $(SRCDIR) -zxf $(DISTDIR)/$(LZ4_DIST)
+ touch $@
+
+# configure lz4
+$(LZ4_OBJDIR)/.configured: $(LZ4_OBJDIR)/.retrieved
+ touch $@
+
+# compile lz4
+$(LZ4_OBJDIR)/.compiled: $(LZ4_OBJDIR)/.configured
+ (cd $(LZ4_SRCDIR)/lib && \
+ env MAKEFLAGS= $(MAKE) PREFIX=$(PREFIX)/lz4)
+ touch $@
+
+# install lz4
+$(LZ4_OBJDIR)/.installed: $(LZ4_OBJDIR)/.compiled
+ mkdir -p $(PREFIX)/lz4/lib
+ (cd $(LZ4_SRCDIR)/lib && \
+ env MAKEFLAGS= $(MAKE) PREFIX=$(PREFIX)/lz4 install)
+ touch $@
+
+#######################################################################
+# svn
+#######################################################################
+
+.PHONY: svn-configure svn-compile svn-install svn-bindings-compile \
+ svn-bindings-install svn-bindings-reset svn-clean
+
+svn-install-all: svn-install svn-bindings-install
+
+svn-retrieve: $(SVN_OBJDIR)/.retrieved
+svn-configure: $(SVN_OBJDIR)/.configured
+svn-compile: $(SVN_OBJDIR)/.compiled
+svn-bindings-compile: $(SVN_OBJDIR)/.bindings-compiled
+svn-install: $(SVN_OBJDIR)/.installed
+svn-bindings-install: $(SVN_OBJDIR)/.bindings-installed
+svn-bindings-reset:
+ $(foreach f, .bindings-compiled .bindings-installed, \
+ rm -f $(SVN_OBJDIR)/$(f);)
+svn-reset: svn-bindings-reset
+ $(foreach f, .retrieved .configured .compiled .installed \
+ .bindings-compiled .bindings-installed, \
+ rm -f $(SVN_OBJDIR)/$(f);)
+
+svn-clean:
+ -(cd $(svn_builddir) && env MAKEFLAGS= make distclean)
+
+# retrieve svn if not present yet
+$(SVN_OBJDIR)/.retrieved:
+ [ -d $(SVN_OBJDIR) ] || mkdir -p $(SVN_OBJDIR)
+ if [ "$(TAG)" != "none" ]; then \
+ branchdir="tags/$(TAG)"; \
+ co="export"; \
+ elif [ $(BRANCH) != trunk ]; then \
+ branchdir="branches/$(BRANCH)"; \
+ co="co"; \
+ else \
+ branchdir="$(BRANCH)"; \
+ co="co"; \
+ fi; \
+ if [ ! -d $(SVN_WC) ] && [ ! -h $(SVN_WC) ]; then \
+ svn $${co} $(SUBVERSION_REPOS_URL)/$${branchdir} \
+ $(SVN_WC); \
+ fi
+ touch $@
+
+ifeq ($(BRANCH_MAJOR),1.7)
+BDB_FLAG=db.h:$(PREFIX)/bdb/include:$(PREFIX)/bdb/lib:db-$(BDB_MAJOR_VER)
+SERF_FLAG=--with-serf="$(PREFIX)/serf"
+SERF_LDFLAG=-Wl,-rpath,$(PREFIX)/serf/lib -Wl,-rpath,$(PREFIX)/bdb/lib
+MOD_DAV_SVN=modules/svn-$(WC)/mod_dav_svn.so
+MOD_AUTHZ_SVN=modules/svn-$(WC)/mod_authz_svn.so
+MOD_DONTDOTHAT=modules/svn-$(WC)/mod_dontdothat.so
+LIBMAGIC_FLAG=--with-libmagic=$(PREFIX)/libmagic
+NEON_FLAG=--with-neon="$(PREFIX)/neon"
+JAVAHL_CHECK_TARGET=check-javahl
+else ifeq ($(BRANCH_MAJOR),1.6)
+BDB_FLAG=db.h:$(PREFIX)/bdb/include:$(PREFIX)/bdb/lib:db-$(BDB_MAJOR_VER)
+SERF_FLAG=--with-serf="$(PREFIX)/serf"
+SERF_LDFLAG=-Wl,-rpath,$(PREFIX)/serf/lib -Wl,-rpath,$(PREFIX)/bdb/lib
+MOD_DAV_SVN=modules/svn-$(WC)/mod_dav_svn.so
+MOD_AUTHZ_SVN=modules/svn-$(WC)/mod_authz_svn.so
+MOD_DONTDOTHAT=modules/svn-$(WC)/mod_dontdothat.so
+W_NO_SYSTEM_HEADERS=-Wno-system-headers
+NEON_FLAG=--with-neon="$(PREFIX)/neon"
+JAVAHL_CHECK_TARGET=check-javahl
+else ifeq ($(BRANCH_MAJOR),1.5)
+BDB_FLAG=$(PREFIX)/bdb
+SERF_FLAG=--with-serf="$(PREFIX)/serf-old"
+MOD_DAV_SVN=modules/mod_dav_svn.so
+MOD_AUTHZ_SVN=modules/mod_authz_svn.so
+MOD_DONTDOTHAT=modules/mod_dontdothat.so
+DISABLE_NEON_VERSION_CHECK=--disable-neon-version-check
+W_NO_SYSTEM_HEADERS=-Wno-system-headers
+NEON_FLAG=--with-neon="$(PREFIX)/neon"
+JAVAHL_CHECK_TARGET=check-javahl
+else ifeq ($(BRANCH_MAJOR), $(filter $(BRANCH_MAJOR), 1.8 1.9))
+BDB_FLAG=db.h:$(PREFIX)/bdb/include:$(PREFIX)/bdb/lib:db-$(BDB_MAJOR_VER)
+SERF_FLAG=--with-serf="$(PREFIX)/serf"
+# serf >= 1.3.0 is built with scons and no longer sets up rpath linker flags,
+# so we have to do that ourselves :(
+SERF_LDFLAG=-Wl,-rpath,$(PREFIX)/serf/lib -Wl,-rpath,$(PREFIX)/bdb/lib
+MOD_DAV_SVN=modules/svn-$(WC)/mod_dav_svn.so
+MOD_AUTHZ_SVN=modules/svn-$(WC)/mod_authz_svn.so
+MOD_DONTDOTHAT=modules/svn-$(WC)/mod_dontdothat.so
+LIBMAGIC_FLAG=--with-libmagic=$(PREFIX)/libmagic
+JAVAHL_CHECK_TARGET=check-all-javahl
+else # 1.10
+BDB_FLAG=db.h:$(PREFIX)/bdb/include:$(PREFIX)/bdb/lib:db-$(BDB_MAJOR_VER)
+SERF_FLAG=--with-serf="$(PREFIX)/serf"
+# serf >= 1.3.0 is built with scons and no longer sets up rpath linker flags,
+# so we have to do that ourselves :(
+SERF_LDFLAG=-Wl,-rpath,$(PREFIX)/serf/lib -Wl,-rpath,$(PREFIX)/bdb/lib
+MOD_DAV_SVN=modules/svn-$(WC)/mod_dav_svn.so
+MOD_AUTHZ_SVN=modules/svn-$(WC)/mod_authz_svn.so
+MOD_DONTDOTHAT=modules/svn-$(WC)/mod_dontdothat.so
+LIBMAGIC_FLAG=--with-libmagic=$(PREFIX)/libmagic
+JAVAHL_CHECK_TARGET=check-all-javahl
+LZ4_FLAG=--with-lz4=$(PREFIX)/lz4
+UTF8PROC_FLAG=--with-utf8proc=internal
+endif
+
+ifeq ($(ENABLE_JAVA_BINDINGS),yes)
+ JAVAHL_FLAG=--enable-javahl=yes --with-jdk --with-jikes=no \
+ --with-junit=$(DISTDIR)/$(JUNIT_DIST)
+else
+ JAVAHL_FLAG=--with-jdk=no
+endif
+
+ifdef PROFILE
+SVN_STATIC_FLAG=--enable-all-static
+else
+SVN_STATIC_FLAG=--disable-static
+SVN_WITH_HTTPD=--with-apxs="$(PREFIX)/httpd/bin/apxs" \
+ --with-apache-libexecdir="$(PREFIX)/httpd/modules/svn-$(WC)"
+SVN_WITH_SASL=--with-sasl="$(PREFIX)/cyrus-sasl"
+endif
+
+$(SVN_OBJDIR)/.configured: $(SVN_OBJDIR)/.retrieved $(DISTDIR)/$(JUNIT_DIST) \
+ $(APR_OBJDIR)/.installed $(APR_UTIL_OBJDIR)/.installed \
+ $(BDB_OBJDIR)/.installed $(SQLITE_OBJDIR)/.installed \
+ $(HTTPD_OBJDIR)/.installed $(CYRUS_SASL_OBJDIR)/.installed \
+ $(LIBMAGIC_OBJDIR)/.installed $(NEON_OBJDIR)/.installed \
+ $(SERF_OBJDIR)/.installed $(SERF_OLD_OBJDIR)/.installed \
+ $(RUBY_OBJDIR)/.installed $(PYTHON_OBJDIR)/.installed
+ cd $(SVN_SRCDIR) && ./autogen.sh
+ cd $(svn_builddir) && \
+ env LDFLAGS="-L$(PREFIX)/neon/lib -L$(PREFIX)/apr/lib $(SERF_LDFLAG) -L$(PREFIX)/gettext/lib -L$(PREFIX)/iconv/lib" \
+ CFLAGS="-I$(PREFIX)/gettext/include -DAPR_POOL_DEBUG" \
+ CXXFLAGS="-I$(PREFIX)/gettext/include -DAPR_POOL_DEBUG" \
+ LD_LIBRARY_PATH="$(LD_LIBRARY_PATH):$$LD_LIBRARY_PATH" \
+ GREP="`which grep`" \
+ PATH=$(PREFIX)/ruby/bin:$(PREFIX)/python/bin:$(PREFIX)/gettext/bin:$$PATH \
+ $(SVN_SRCDIR)/configure \
+ --enable-maintainer-mode \
+ --prefix="$(SVN_PREFIX)" \
+ --with-apr="$(PREFIX)/apr" \
+ --with-apr-util="$(PREFIX)/apr" \
+ $(NEON_FLAG) \
+ $(SVN_WITH_HTTPD) \
+ $(SVN_WITH_SASL) \
+ $(SERF_FLAG) \
+ --with-sqlite="$(PREFIX)/sqlite" \
+ --with-zlib="/usr" \
+ --without-gnome-keyring \
+ --with-berkeley-db="$(BDB_FLAG)" \
+ --with-ruby-sitedir="$(SVN_PREFIX)/lib/ruby/site_ruby" \
+ --disable-mod-activation \
+ $(JAVAHL_FLAG) \
+ $(LIBMAGIC_FLAG) \
+ $(LZ4_FLAG) \
+ $(UTF8PROC_FLAG) \
+ $(SVN_STATIC_FLAG) \
+ $(DISABLE_NEON_VERSION_CHECK)
+ touch $@
+
+# compile svn
+$(SVN_OBJDIR)/.compiled: $(SVN_OBJDIR)/.configured
+ cd $(svn_builddir) \
+ && env MAKEFLAGS= make EXTRA_CFLAGS="$(PROFILE_CFLAGS) $(W_NO_SYSTEM_HEADERS)"
+ touch $@
+
+# install svn
+$(SVN_OBJDIR)/.installed: $(SVN_OBJDIR)/.compiled
+ cd $(svn_builddir) \
+ && env MAKEFLAGS= make install install-tools
+ touch $@
+
+# SWIG 1.x and 2.x are not compatible. If SWIG 2.x is used to generated .swg
+# files and 1.x is used to build the bindings, the Python bindings fail to
+# load with errors such as "undefined symbol 'SWIG_Python_str_AsChar'".
+# So clean any pre-generated .swg files to make sure everything is done
+# by the same version of SWIG.
+$(SVN_OBJDIR)/.pre-generated-swig-cleaned:
+ -cd $(svn_builddir) \
+ && env MAKEFLAGS= make clean-swig
+ touch $@
+
+$(SVN_OBJDIR)/.bindings-compiled: $(SVN_OBJDIR)/.installed $(SVN_OBJDIR)/.pre-generated-swig-cleaned
+ cd $(svn_builddir) \
+ && env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
+ env MAKEFLAGS= make swig-py
+ cd $(svn_builddir) && \
+ env PATH=$(PREFIX)/ruby/bin:$$PATH \
+ LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) env MAKEFLAGS= make swig-rb
+ if [ $(ENABLE_PERL_BINDINGS) = yes ]; then \
+ cd $(svn_builddir) \
+ && env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
+ env MAKEFLAGS= make swig-pl; \
+ fi
+ if [ $(ENABLE_JAVA_BINDINGS) = yes ]; then \
+ cd $(svn_builddir) \
+ && env MAKEFLAGS= make javahl; \
+ fi
+ touch $@
+
+$(SVN_OBJDIR)/.bindings-installed: $(SVN_OBJDIR)/.bindings-compiled
+ cd $(svn_builddir) \
+ && env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
+ env MAKEFLAGS= make install-swig-py
+ cd $(svn_builddir) && \
+ env PATH=$(PREFIX)/ruby/bin:$$PATH \
+ LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) env MAKEFLAGS= make install-swig-rb
+ if [ $(ENABLE_PERL_BINDINGS) = yes ]; then \
+ cd $(svn_builddir) \
+ && env MAKEFLAGS= make install-swig-pl-lib; \
+ cd subversion/bindings/swig/perl/native \
+ && perl Makefile.PL PREFIX="$(SVN_PREFIX)" \
+ && env MAKEFLAGS= make install; \
+ fi
+ if [ $(ENABLE_JAVA_BINDINGS) = yes ]; then \
+ cd $(svn_builddir) \
+ && env MAKEFLAGS= make install-javahl; \
+ fi
+ touch $@
+
+# run svn regression tests
+HTTPD_CHECK_CONF=$(PREFIX)/httpd/conf/httpd-svn-check-$(WC).conf
+HTTPD_PROXY_CONF=$(PREFIX)/httpd/conf/httpd-svn-proxy-$(WC).conf
+HTTPD_CHECK_USERS=$(PREFIX)/httpd/conf/httpd-svn-check-users
+HTTPD_CHECK_GROUPS=$(PREFIX)/httpd/conf/httpd-svn-check-groups
+HTTPD_CHECK_PORT=8081
+HTTPD_PROXY_PORT=8082
+MOD_DONTDOTHAT_CONF=$(PREFIX)/httpd/conf/dontdothat
+
+$(MOD_DONTDOTHAT_CONF):
+ mkdir -p $(dir $@)
+ echo > $@.tmp '[recursive-actions]'
+ echo >>$@.tmp '/ = deny'
+ mv -f $@.tmp $@
+
+$(HTTPD_CHECK_GROUPS):
+ mkdir -p $(dir $@)
+ printf "random: jrandom\nconstant: jconstant\n" > $@
+
+$(HTTPD_CHECK_CONF): $(MOD_DONTDOTHAT_CONF) $(HTTPD_CHECK_GROUPS)
+ mkdir -p $(dir $@)
+ $(PREFIX)/httpd/bin/htpasswd -bc $(HTTPD_CHECK_USERS).tmp jrandom rayjandom
+ $(PREFIX)/httpd/bin/htpasswd -b $(HTTPD_CHECK_USERS).tmp jconstant rayjandom
+ $(PREFIX)/httpd/bin/htpasswd -b $(HTTPD_CHECK_USERS).tmp __dumpster__ __loadster__
+ $(PREFIX)/httpd/bin/htpasswd -b $(HTTPD_CHECK_USERS).tmp JRANDOM rayjandom
+ $(PREFIX)/httpd/bin/htpasswd -b $(HTTPD_CHECK_USERS).tmp JCONSTANT rayjandom
+ mv -f $(HTTPD_CHECK_USERS).tmp $(HTTPD_CHECK_USERS)
+ echo > $@.tmp '# httpd config for make check'
+ echo >>$@.tmp 'ServerRoot "$(PREFIX)/httpd"'
+ echo >>$@.tmp 'Listen localhost:$(HTTPD_CHECK_PORT)'
+ echo >>$@.tmp 'LoadModule dav_svn_module $(MOD_DAV_SVN)'
+ echo >>$@.tmp 'LoadModule authz_svn_module $(MOD_AUTHZ_SVN)'
+ echo >>$@.tmp 'LoadModule dontdothat_module $(MOD_DONTDOTHAT)'
+ echo >>$@.tmp 'DocumentRoot "$(PREFIX)/httpd/htdocs"'
+ echo >>$@.tmp '# These two Locations are used for "make check"'
+ echo >>$@.tmp '<Directory />'
+ echo >>$@.tmp ' Options FollowSymLinks'
+ echo >>$@.tmp ' AllowOverride None'
+ echo >>$@.tmp ' Order deny,allow'
+ echo >>$@.tmp ' Allow from all'
+ echo >>$@.tmp '</Directory>'
+ echo >>$@.tmp '<Location /svn-test-work/repositories>'
+ echo >>$@.tmp ' DAV svn'
+ echo >>$@.tmp ' SVNParentPath $(SVN_WC)/subversion/tests/cmdline/svn-test-work/repositories'
+ echo >>$@.tmp ' AuthzSVNAccessFile $(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz'
+ echo >>$@.tmp ' AuthType Basic'
+ echo >>$@.tmp ' AuthName "Subversion Repository"'
+ echo >>$@.tmp ' AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >>$@.tmp ' Require valid-user'
+ifeq ($(USE_HTTPV1),yes)
+ echo >>$@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ifeq ($(USE_AUTHZ_SHORT_CIRCUIT),yes)
+ echo >>$@.tmp ' SVNPathAuthz short_circuit'
+endif
+ echo >>$@.tmp '</Location>'
+ echo >>$@.tmp '<Location /svn-test-work/local_tmp/repos>'
+ echo >>$@.tmp ' DAV svn'
+ echo >>$@.tmp ' SVNPath $(SVN_WC)/subversion/tests/cmdline/svn-test-work/local_tmp/repos'
+ echo >>$@.tmp ' AuthzSVNAccessFile $(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz'
+ echo >>$@.tmp ' AuthType Basic'
+ echo >>$@.tmp ' AuthName "Subversion Repository"'
+ echo >>$@.tmp ' AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >>$@.tmp ' Require valid-user'
+ifeq ($(USE_HTTPV1),yes)
+ echo >>$@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ifeq ($(USE_AUTHZ_SHORT_CIRCUIT),yes)
+ echo >>$@.tmp ' SVNPathAuthz short_circuit'
+endif
+ echo >>$@.tmp '</Location>'
+ echo >>$@.tmp '# This Location lets you access repositories dropped in /tmp/'
+ echo >>$@.tmp '<Location /svn>'
+ echo >>$@.tmp ' DAV svn'
+ echo >>$@.tmp ' SVNParentPath /tmp'
+ echo >>$@.tmp ' Allow from all'
+ echo >>$@.tmp ' #AuthType Basic'
+ echo >>$@.tmp ' #AuthName "Subversion Repository"'
+ echo >>$@.tmp ' #AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >>$@.tmp ' #Require valid-user'
+ifeq ($(USE_HTTPV1),yes)
+ echo >> $@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ifeq ($(USE_AUTHZ_SHORT_CIRCUIT),yes)
+ echo >> $@.tmp ' SVNPathAuthz short_circuit'
+endif
+ echo >>$@.tmp '</Location>'
+ echo >>$@.tmp '# Location for tests using mod_dontdothat'
+ echo >>$@.tmp '<Location /ddt-test-work/repositories>'
+ echo >> $@.tmp 'DAV svn'
+ echo >> $@.tmp 'SVNParentPath $(SVN_WC)/subversion/tests/cmdline/svn-test-work/repositories'
+ echo >> $@.tmp 'AuthzSVNAccessFile $(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz'
+ echo >> $@.tmp 'AuthType Basic'
+ echo >> $@.tmp 'AuthName "Subversion Repository"'
+ echo >> $@.tmp 'AuthzSVNAccessFile $(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz'
+ echo >> $@.tmp 'AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >> $@.tmp 'Require valid-user'
+ifeq ($(USE_HTTPV1),yes)
+ echo >> $@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ echo >> $@.tmp 'DontDoThatConfigFile "$(MOD_DONTDOTHAT_CONF)"'
+ echo >> $@.tmp '</Location>'
+
+ echo >>$@.tmp '# Several locations for mod_authz_svn test follow'
+ echo >>$@.tmp '<Location /authz-test-work/anon>'
+ echo >>$@.tmp ' DAV svn'
+ echo >>$@.tmp ' SVNParentPath $(SVN_WC)/subversion/tests/cmdline/svn-test-work/local_tmp'
+ echo >>$@.tmp ' AuthzSVNAccessFile $(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz'
+ifeq ($(USE_HTTPV1),yes)
+ echo >>$@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ echo >>$@.tmp ' SVNListParentPath On'
+ echo >>$@.tmp ' <IfModule mod_authz_core.c>'
+ echo >>$@.tmp ' Require all granted'
+ echo >>$@.tmp ' </IfModule>'
+ echo >>$@.tmp ' <IfModule !mod_authz_core.c>'
+ echo >>$@.tmp ' Allow from all'
+ echo >>$@.tmp ' </IfModule>'
+ifeq ($(USE_AUTHZ_SHORT_CIRCUIT),yes)
+ echo >>$@.tmp ' SVNPathAuthz short_circuit'
+endif
+ echo >>$@.tmp '</Location>'
+ echo >>$@.tmp '<Location /authz-test-work/mixed>'
+ echo >>$@.tmp ' DAV svn'
+ echo >>$@.tmp ' SVNParentPath $(SVN_WC)/subversion/tests/cmdline/svn-test-work/local_tmp'
+ echo >>$@.tmp ' AuthzSVNAccessFile $(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz'
+ifeq ($(USE_HTTPV1),yes)
+ echo >>$@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ echo >>$@.tmp ' SVNListParentPath On'
+ echo >>$@.tmp ' AuthType Basic'
+ echo >>$@.tmp ' AuthName "Subversion Repository"'
+ echo >>$@.tmp ' AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >>$@.tmp ' Require valid-user'
+ echo >>$@.tmp ' Satisfy Any'
+ifeq ($(USE_AUTHZ_SHORT_CIRCUIT),yes)
+ echo >>$@.tmp ' SVNPathAuthz short_circuit'
+endif
+ echo >>$@.tmp '</Location>'
+ echo >>$@.tmp '<Location /authz-test-work/mixed-noauthwhenanon>'
+ echo >>$@.tmp ' DAV svn'
+ echo >>$@.tmp ' SVNParentPath $(SVN_WC)/subversion/tests/cmdline/svn-test-work/local_tmp'
+ echo >>$@.tmp ' AuthzSVNAccessFile $(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz'
+ifeq ($(USE_HTTPV1),yes)
+ echo >>$@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ echo >>$@.tmp ' SVNListParentPath On'
+ echo >>$@.tmp ' AuthType Basic'
+ echo >>$@.tmp ' AuthName "Subversion Repository"'
+ echo >>$@.tmp ' AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >>$@.tmp ' Require valid-user'
+ echo >>$@.tmp ' AuthzSVNNoAuthWhenAnonymousAllowed On'
+ echo >>$@.tmp ' SVNPathAuthz On'
+ifeq ($(USE_AUTHZ_SHORT_CIRCUIT),yes)
+ echo >>$@.tmp ' SVNPathAuthz short_circuit'
+endif
+ echo >>$@.tmp '</Location>'
+ echo >>$@.tmp '<Location /authz-test-work/authn>'
+ echo >>$@.tmp ' DAV svn'
+ echo >>$@.tmp ' SVNParentPath $(SVN_WC)/subversion/tests/cmdline/svn-test-work/local_tmp'
+ echo >>$@.tmp ' AuthzSVNAccessFile $(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz'
+ifeq ($(USE_HTTPV1),yes)
+ echo >>$@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ echo >>$@.tmp ' SVNListParentPath On'
+ echo >>$@.tmp ' AuthType Basic'
+ echo >>$@.tmp ' AuthName "Subversion Repository"'
+ echo >>$@.tmp ' AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >>$@.tmp ' Require valid-user'
+ifeq ($(USE_AUTHZ_SHORT_CIRCUIT),yes)
+ echo >>$@.tmp ' SVNPathAuthz short_circuit'
+endif
+ echo >>$@.tmp '</Location>'
+ echo >>$@.tmp '<Location /authz-test-work/authn-anonoff>'
+ echo >>$@.tmp ' DAV svn'
+ echo >>$@.tmp ' SVNParentPath $(SVN_WC)/subversion/tests/cmdline/svn-test-work/local_tmp'
+ echo >>$@.tmp ' AuthzSVNAccessFile $(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz'
+ifeq ($(USE_HTTPV1),yes)
+ echo >>$@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ echo >>$@.tmp ' SVNListParentPath On'
+ echo >>$@.tmp ' AuthType Basic'
+ echo >>$@.tmp ' AuthName "Subversion Repository"'
+ echo >>$@.tmp ' AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >>$@.tmp ' Require valid-user'
+ echo >>$@.tmp ' AuthzSVNAnonymous Off'
+ echo >>$@.tmp ' SVNPathAuthz On'
+ echo >>$@.tmp '</Location>'
+ echo >>$@.tmp '<Location /authz-test-work/authn-lcuser>'
+ echo >>$@.tmp ' DAV svn'
+ echo >>$@.tmp ' SVNParentPath $(SVN_WC)/subversion/tests/cmdline/svn-test-work/local_tmp'
+ echo >>$@.tmp ' AuthzSVNAccessFile $(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz'
+ifeq ($(USE_HTTPV1),yes)
+ echo >>$@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ echo >>$@.tmp ' SVNListParentPath On'
+ echo >>$@.tmp ' AuthType Basic'
+ echo >>$@.tmp ' AuthName "Subversion Repository"'
+ echo >>$@.tmp ' AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >>$@.tmp ' Require valid-user'
+ echo >>$@.tmp ' AuthzForceUsernameCase Lower'
+ifeq ($(USE_AUTHZ_SHORT_CIRCUIT),yes)
+ echo >>$@.tmp ' SVNPathAuthz short_circuit'
+endif
+ echo >>$@.tmp '</Location>'
+ echo >>$@.tmp '<Location /authz-test-work/authn-group>'
+ echo >>$@.tmp ' DAV svn'
+ echo >>$@.tmp ' SVNParentPath $(SVN_WC)/subversion/tests/cmdline/svn-test-work/local_tmp'
+ echo >>$@.tmp ' AuthzSVNAccessFile $(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz'
+ifeq ($(USE_HTTPV1),yes)
+ echo >>$@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ echo >>$@.tmp ' SVNListParentPath On'
+ echo >>$@.tmp ' AuthType Basic'
+ echo >>$@.tmp ' AuthName "Subversion Repository"'
+ echo >>$@.tmp ' AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >>$@.tmp ' AuthGroupFile $(HTTPD_CHECK_GROUPS)'
+ echo >>$@.tmp ' Require group random'
+ echo >>$@.tmp ' AuthzSVNAuthoritative Off'
+ echo >>$@.tmp ' SVNPathAuthz On'
+ echo >>$@.tmp '</Location>'
+ echo >>$@.tmp '<IfModule mod_authz_core.c>'
+ echo >>$@.tmp ' <Location /authz-test-work/sallrany>'
+ echo >>$@.tmp ' DAV svn'
+ echo >>$@.tmp ' SVNParentPath $($SVN_WC)/subversion/tests/cmdline/svn-test-work/local_tmp'
+ echo >>$@.tmp ' AuthzSVNAccessFile $(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz'
+ifeq ($(USE_HTTPV1),yes)
+ echo >>$@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ echo >>$@.tmp ' SVNListParentPath On'
+ echo >>$@.tmp ' AuthType Basic'
+ echo >>$@.tmp ' AuthName "Subversion Repository"'
+ echo >>$@.tmp ' AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >>$@.tmp ' AuthzSendForbiddenOnFailure On'
+ echo >>$@.tmp ' Satisfy All'
+ echo >>$@.tmp ' <RequireAny>'
+ echo >>$@.tmp ' Require valid-user'
+ echo >>$@.tmp ' Require expr req("ALLOW") == "1"'
+ echo >>$@.tmp ' </RequireAny>'
+ifeq ($(USE_AUTHZ_SHORT_CIRCUIT),yes)
+ echo >>$@.tmp ' SVNPathAuthz short_circuit'
+endif
+ echo >>$@.tmp ' </Location>'
+ echo >>$@.tmp ' <Location /authz-test-work/sallrall>'
+ echo >>$@.tmp ' DAV svn'
+ echo >>$@.tmp ' SVNParentPath $(SVN_WC)/subversion/tests/cmdline/svn-test-work/local_tmp'
+ echo >>$@.tmp ' AuthzSVNAccessFile $(SVN_WC)/subversion/tests/cmdline/svn-test-work/authz'
+ifeq ($(USE_HTTPV1),yes)
+ echo >>$@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ echo >>$@.tmp ' SVNListParentPath On'
+ echo >>$@.tmp ' AuthType Basic'
+ echo >>$@.tmp ' AuthName "Subversion Repository"'
+ echo >>$@.tmp ' AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >>$@.tmp ' AuthzSendForbiddenOnFailure On'
+ echo >>$@.tmp ' Satisfy All'
+ echo >>$@.tmp ' <RequireAll>'
+ echo >>$@.tmp ' Require valid-user'
+ echo >>$@.tmp ' Require expr req("ALLOW") == "1"'
+ echo >>$@.tmp ' </RequireAll>'
+ifeq ($(USE_AUTHZ_SHORT_CIRCUIT),yes)
+ echo >>$@.tmp ' SVNPathAuthz short_circuit'
+endif
+ echo >>$@.tmp ' </Location>'
+ echo >>$@.tmp '</IfModule>'
+ echo >>$@.tmp 'RedirectMatch permanent ^/svn-test-work/repositories/REDIRECT-PERM-(.*)$$ /svn-test-work/repositories/$$1'
+ echo >>$@.tmp 'RedirectMatch ^/svn-test-work/repositories/REDIRECT-TEMP-(.*)$$ /svn-test-work/repositories/$$1'
+ echo >>$@.tmp 'Include "conf/$(SVN_REL_WC)*-custom.conf"'
+ echo >> $@.tmp '#SVNInMemoryCacheSize 0'
+ echo >> $@.tmp '#SVNCacheTextDeltas Off'
+ echo >> $@.tmp '#SVNCacheRevProps Off'
+ mv -f $@.tmp $@
+
+$(HTTPD_PROXY_CONF): $(HTTPD_CHECK_CONF)
+ mkdir -p $(dir $@)
+ echo > $@.tmp '# httpd config for a write-through proxy'
+ echo >>$@.tmp 'ServerRoot "$(PREFIX)/httpd"'
+ echo >>$@.tmp 'Listen localhost:$(HTTPD_PROXY_PORT)'
+ echo >>$@.tmp 'LoadModule dav_svn_module $(MOD_DAV_SVN)'
+ echo >>$@.tmp 'LoadModule authz_svn_module $(MOD_AUTHZ_SVN)'
+ echo >>$@.tmp 'LoadModule dontdothat_module $(MOD_DONTDOTHAT)'
+ echo >>$@.tmp 'DocumentRoot "$(PREFIX)/httpd/htdocs"'
+ echo >>$@.tmp '# This Location lets you access repositories dropped in /tmp/svn-$(BRANCH)-proxy'
+ echo >>$@.tmp '<Location /svn>'
+ echo >>$@.tmp ' DAV svn'
+ echo >>$@.tmp ' SVNParentPath /tmp/svn-$(BRANCH)-proxy'
+ echo >>$@.tmp ' SVNMasterURI http://localhost:$(HTTPD_CHECK_PORT)/svn/'
+ echo >>$@.tmp ' Allow from all'
+ echo >>$@.tmp ' #AuthType Basic'
+ echo >>$@.tmp ' #AuthName "Subversion Repository"'
+ echo >>$@.tmp ' #AuthUserFile $(HTTPD_CHECK_USERS)'
+ echo >>$@.tmp ' #Require valid-user'
+ifeq ($(USE_HTTPV1),yes)
+ echo >> $@.tmp ' SVNAdvertiseV2Protocol off'
+endif
+ifeq ($(USE_AUTHZ_SHORT_CIRCUIT),yes)
+ echo >> $@.tmp ' SVNPathAuthz short_circuit'
+endif
+ echo >>$@.tmp '</Location>'
+ echo >>$@.tmp '# This Location allows repositories to be synced'
+ echo >>$@.tmp '<Location /svn-proxy-sync>'
+ echo >>$@.tmp 'DAV svn'
+ echo >>$@.tmp 'SVNParentPath /tmp/svn-$(BRANCH)-proxy'
+ echo >>$@.tmp 'Allow from all'
+ echo >>$@.tmp '</Location>'
+ mv -f $@.tmp $@
+
+.PHONY: libpath
+libpath:
+ @echo export LD_LIBRARY_PATH="$(LD_LIBRARY_PATH):$$LD_LIBRARY_PATH" \
+ "PYTHONPATH=$(SVN_PREFIX)/lib/svn-python"
+#
+# OpenBSD requires an LD_PRELOAD hack to dlopen() libraries linked to
+# libpthread (e.g. libsvn_auth_gnome_keyring.so) into executables that
+# aren't linked to libpthread.
+ifeq ($(UNAME),OpenBSD)
+LIB_PTHREAD_HACK=LD_PRELOAD=libpthread.so
+endif
+
+.PHONY: start-svnserve stop-svnserve start-httpd stop-httpd
+
+HTTPD_CMD = env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) $(LIB_PTHREAD_HACK) \
+ $(PREFIX)/httpd/bin/apachectl
+HTTPD_START_CMD = $(HTTPD_CMD) -f $(HTTPD_CHECK_CONF) -k start
+HTTPD_START_CMD_PROXY = $(HTTPD_CMD) -f $(HTTPD_PROXY_CONF)
+HTTPD_START_CMD_DEBUG = $(HTTPD_START_CMD) -X
+HTTPD_STOP_CMD = $(HTTPD_CMD) -f $(HTTPD_CHECK_CONF) -k stop; sleep 3
+
+SVNSERVE_START_CMD = (test -e $(PWD)/svnserve-*.pid && \
+ ls $(PWD)/svnserve-*.pid | while read pidfile; do \
+ kill `cat "$$pidfile"`; sleep 3; \
+ rm -f $$pidfile; \
+ done); \
+ $(SVN_PREFIX)/bin/svnserve \
+ --listen-host 127.0.0.1 \
+ --pid-file $(PWD)/svnserve-$(WC).pid \
+ -d -r $(svn_builddir)/subversion/tests/cmdline
+SVNSERVE_STOP_CMD = kill `cat $(PWD)/svnserve-$(WC).pid`; sleep 3; \
+ rm -f $(PWD)/svnserve-$(WC).pid
+
+start-httpd: $(HTTPD_CHECK_CONF)
+ $(HTTPD_START_CMD)
+ @echo "To run tests over http, run:"
+ @echo " make check BASE_URL=http://localhost:$(HTTPD_CHECK_PORT)"
+ @echo "The URL http://localhost:$(HTTPD_CHECK_PORT)/svn/"
+ @echo "lets you access repositories dropped into /tmp"
+
+start-httpd-debug: $(HTTPD_CHECK_CONF)
+ $(HTTPD_START_CMD_DEBUG) &
+ @echo "To run tests over http, run:"
+ @echo " make check BASE_URL=http://localhost:$(HTTPD_CHECK_PORT)"
+ @echo "The URL http://localhost:$(HTTPD_CHECK_PORT)/svn/"
+ @echo "lets you access repositories dropped into /tmp"
+ @echo "Trying to attach gdb to httpd..."
+ @sleep 1
+ gdb $(PREFIX)/httpd/bin/httpd `cat $(PREFIX)/httpd/logs/httpd.pid`
+
+start-httpd-proxy: $(HTTPD_PROXY_CONF)
+ $(HTTPD_START_CMD_PROXY)
+ @echo "The URL http://localhost:$(HTTPD_PROXY_PORT)/svn/"
+ @echo "lets you access repositories dropped into /tmp/svn-$(BRANCH)-proxy"
+
+stop-httpd: $(HTTPD_CHECK_CONF)
+ $(HTTPD_STOP_CMD)
+
+stop-httpd-proxy: $(HTTPD_PROXY_CONF)
+ pkill -f '$(PREFIX)/httpd/bin/httpd -f $(HTTPD_PROXY_CONF)'
+
+start-svnserve: $(SVN_OBJDIR)/.compiled
+ $(SVNSERVE_START_CMD)
+
+stop-svnserve:
+ $(SVNSERVE_STOP_CMD)
+
+define do_check
+-cd $(svn_builddir) && for fs in fsfs bdb; do \
+ echo "Begin test: $(subst svn-check-,,$@) x $$fs"; \
+ test -d "$(RAMDISK)/tmp" && export TMPDIR="$(RAMDISK)/tmp"; \
+ env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) $(LIB_PTHREAD_HACK) \
+ env MAKEFLAGS= make check PARALLEL=$(PARALLEL) CLEANUP=$(CLEANUP) \
+ EXCLUSIVE_WC_LOCKS=$(EXCLUSIVE_WC_LOCKS) \
+ MEMCACHED_SERVER=$(MEMCACHED_SERVER) $1 FS_TYPE=$$fs; \
+ for log in tests.log fails.log; do \
+ test -f $$log && mv -f $$log $$log.$@-$$fs; \
+ done; \
+done
+endef
+
+TEST_WORK=$(svn_builddir)/subversion/tests/cmdline/svn-test-work
+svn-check-prepare-ramdisk:
+ -rm -rf "$(TEST_WORK)"; \
+ if [ -d "$(RAMDISK)" ] && \
+ touch "$(RAMDISK)/$(SVN_REL_WC).writetest" && \
+ mkdir -p "$(RAMDISK)/$(SVN_REL_WC)"; then \
+ rm -f "$(RAMDISK)/$(SVN_REL_WC).writetest"; \
+ ln -s "$(RAMDISK)/$(SVN_REL_WC)" "$(TEST_WORK)"; \
+ mkdir -p "$(RAMDISK)/tmp"; \
+ fi
+
+ifndef NEON_FLAG
+svn-check-neon:
+ @echo Neon is not supported by this build of Subversion, skipping tests
+ @true
+else
+svn-check-neon: $(HTTPD_CHECK_CONF) $(SVN_OBJDIR)/.compiled $(SVN_OBJDIR)/.bindings-compiled svn-check-prepare-ramdisk
+ $(HTTPD_START_CMD)
+ $(call do_check,BASE_URL=http://localhost:$(HTTPD_CHECK_PORT) HTTP_LIBRARY=neon)
+ $(HTTPD_STOP_CMD)
+endif
+
+svn-check-serf: $(HTTPD_CHECK_CONF) $(SVN_OBJDIR)/.compiled $(SVN_OBJDIR)/.bindings-compiled svn-check-prepare-ramdisk
+ $(HTTPD_START_CMD)
+ $(call do_check,BASE_URL=http://localhost:$(HTTPD_CHECK_PORT) HTTP_LIBRARY=serf)
+ $(HTTPD_STOP_CMD)
+
+svn-check-local: svn-check-prepare-ramdisk
+ $(call do_check)
+
+svn-check-svn: svn-check-prepare-ramdisk
+ $(SVNSERVE_START_CMD)
+ $(call do_check,BASE_URL=svn://127.0.0.1)
+ $(SVNSERVE_STOP_CMD)
+
+.PHONY: svn-check-swig-pl svn-check-swig-py svn-check-swig-rb svn-check-javahl
+svn-check-bindings: svn-check-swig-pl svn-check-swig-py svn-check-swig-rb \
+ svn-check-javahl
+
+RUBYLIB=$(SVN_PREFIX)/lib/ruby/site_ruby$(shell grep \
+ ^svn_cv_ruby_sitedir_archsuffix $(svn_builddir)/config.log | \
+ cut -d'=' -f2):$(SVN_PREFIX)/lib/ruby/site_ruby$(shell \
+ grep ^svn_cv_ruby_sitedir_libsuffix $(svn_builddir)/config.log | \
+ cut -d'=' -f2)
+svn-check-swig-pl:
+ -if [ $(ENABLE_PERL_BINDINGS) = yes ]; then \
+ (cd $(svn_builddir) && \
+ env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
+ $(LIB_PTHREAD_HACK) \
+ env MAKEFLAGS= make check-swig-pl 2>&1) | \
+ tee $(svn_builddir)/tests.log.bindings.pl; \
+ fi
+
+svn-check-swig-py:
+ -(cd $(svn_builddir) && \
+ env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
+ env MAKEFLAGS= make check-swig-py 2>&1) | \
+ tee $(svn_builddir)/tests.log.bindings.py
+
+# We add the svn prefix to PATH here because the ruby tests
+# attempt to start an svnserve binary found in PATH.
+svn-check-swig-rb:
+ (cd $(svn_builddir) && \
+ env RUBYLIB=$(RUBYLIB) \
+ LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
+ PATH=$(SVN_PREFIX)/bin:$$PATH \
+ $(LIB_PTHREAD_HACK) \
+ env MAKEFLAGS= make check-swig-rb 2>&1) | \
+ tee $(svn_builddir)/tests.log.bindings.rb
+
+svn-check-javahl:
+ -if [ $(ENABLE_JAVA_BINDINGS) = yes ]; then \
+ (cd $(svn_builddir) && \
+ env LD_LIBRARY_PATH=$(LD_LIBRARY_PATH) \
+ env MAKEFLAGS= make $(JAVAHL_CHECK_TARGET) 2>&1) | \
+ tee $(svn_builddir)/tests.log.bindings.javahl; \
+ fi
+
+svn-check: svn-check-prepare-ramdisk svn-check-local svn-check-svn \
+ svn-check-neon svn-check-serf svn-check-bindings
+
+.PHONY: sign-email
+ifdef NEON_FLAG
+NEON_STR=ra_neon |
+NEON_VER_LINE=@echo "neon: $(NEON_VER)"
+endif
+sign-email:
+ @echo "Summary: +1 to release"
+ @echo ""
+ @echo "Tested: [bdb | fsfs] x [ra_local | ra_svn | $(NEON_STR)ra_serf]"
+ @echo " swig bindings"
+ifeq ($(ENABLE_JAVA_BINDINGS),yes)
+ @echo " javahl bindings"
+endif
+ @echo ""
+ @echo "Test results: All passed."
+ @echo ""
+ @echo "Platform: `uname -r -s -m`"
+ @echo ""
+ @echo "Dependencies:"
+ @echo "bdb: $(BDB_VER)"
+ifeq ($(USE_APR_ICONV),yes)
+ @echo "apr-iconv: $(APR_ICONV_VER)"
+else
+ @echo "GNU-iconv: $(GNU_ICONV_VER)"
+endif
+ @echo "apr: $(APR_VER)"
+ @echo "apr-util: $(APR_UTIL_VER)"
+ @echo "httpd: $(HTTPD_VER)"
+ $(NEON_VER_LINE)
+ @echo "serf: $(SERF_VER)"
+ @echo "cyrus-sasl: $(CYRUS_SASL_VER)"
+ @echo "sqlite: $(SQLITE_VER)"
+ifdef LZ4_FLAG
+ @echo "lz4: $(LZ4_VER)"
+endif
+ @echo "libssl: `openssl version`"
+ @echo "swig: `swig -version | grep Version | cut -d' ' -f3`"
+ @echo "python: $(PYTHON_VER)"
+ @echo "perl: `eval \`perl -V:version\`; echo $$version`"
+ @echo "ruby: $(RUBY_VER)"
+ifeq ($(ENABLE_JAVA_BINDINGS),yes)
+ @echo "java: `java -version 2>&1 | grep version | cut -d' ' -f3 | sed -e 's/\"//g'`"
+endif
+ @echo ""
+ @echo "Signatures:"
+ @echo
+ @echo "subversion-$(TAG).tar.gz"
+ @echo "`cat subversion-$(TAG).tar.gz.asc`"
+ @echo
+ @echo "subversion-$(TAG).tar.bz2"
+ @echo "`cat subversion-$(TAG).tar.bz2.asc`"
diff --git a/tools/dev/unix-build/README b/tools/dev/unix-build/README
new file mode 100644
index 0000000..13cdc42
--- /dev/null
+++ b/tools/dev/unix-build/README
@@ -0,0 +1,96 @@
+Introduction
+============
+Makefile.svn aids Subversion developers on unix-like systems set up an
+SVN development environment without requiring root priviliges. It does
+this by fetching Subversion along with many of its dependencies from
+the internet and building them using sane defaults suitable for
+development (for example, it invokes --enable-maintainer-mode while
+compiling Subversion itself). However, indirect dependencies are not
+covered; you need OpenSSL installed to get SSL support in neon and
+serf for example. Also, it doesn't build all the bindings by default
+(javahl for example).
+
+This README only covers basic usage. Please read Makefile.svn for more
+details.
+
+Requirements
+============
+In addition to the usual GNU buildtools including a sane compiler and
+GNU autotools, some version of Subversion is required to be in
+$PATH. It is used to fetch the desired version of Subversion from the
+repository.
+
+Usage
+=====
+First, choose a directory $(SVN_DEV) to set up the environment.
+For example, $(SVN_DEV) could be the directory "~/svn".
+Note that this directory cannot be changed later because the script
+hardcodes build and link paths relative to the current working directory.
+
+ $ mkdir $(SVN_DEV)
+
+Now change into this directory and make the Makefile available in it:
+
+ $ cd $(SVN_DEV)
+ $ svn checkout https://svn.apache.org/repos/asf/subversion/trunk/tools/dev/unix-build
+ $ ln -s unix-build/Makefile.svn Makefile
+
+To fetch and build trunk, simply don't pass anything, just run 'make':
+
+ $ cd $(SVN_DEV)
+ $ make
+
+Pass the branch you want to build in BRANCH, e.g.
+ $ make BRANCH="1.5.x"
+You can also pass a tag to build:
+ $ make TAG="1.6.6"
+And you can specify a working copy to use, in case you need more
+than one working copy of the same branch:
+ $ make BRANCH="1.6.x" WC="1.6.x-test2"
+
+When the script has finished fetching and building, it uses
+$(SVN_DEV)/prefix to install Subversion libraries and
+binaries. $(SVN_DEV)/prefix/svn-trunk (or whatever you choose to
+build) will contain the latest Subversion binaries. You can add
+$(SVN_DEV)/prefix/svn-trunk/bin to your $PATH to use them:
+
+ $ export PATH="$(SVN_DEV)/prefix/svn-trunk/bin:$PATH"
+
+The Makefile in $(SVN_DEV)/svn-trunk is configured to build with sane
+options: while developing Subversion, simply `svn up` to pull the
+latest changes, `make` and `make install` to install the binaries in
+$(SVN_DEV)/prefix/svn-trunk. This usually works fine. If not, you may
+need to use the 'svn-reset' target and recompile everything.
+
+If at any point, you want to recompile any of the packages with the
+default configuration in Makefile.svn, use the *-clean and *-reset
+target in Makefile.svn before trying to rebuild again. For example:
+
+ $ make svn-clean
+ $ make svn-reset
+ $ make
+
+Or, if you want to recompile svn and all dependencies:
+
+ $ make clean
+ $ make reset
+ $ make
+
+If you want to remove everything including the installed binaries effectively
+returning to the starting point, use the "nuke" target (BE CAREFUL, this will
+remove the 'svn' binary compiled from trunk which you might need to manage
+existing working copies):
+
+ $ make nuke
+
+Extended usage
+==============
+The script can also run Subversion's regression test suite via all
+repository backends and RA methods. It generates the necessary
+configuration files and starts svnserve and httpd daemons
+automatically on non-privileged ports. The default test target to test
+everything is "svn-check".
+
+Notes
+=====
+The script currently doesn't build Ctypes Python bindings.
diff --git a/tools/dev/verify-history.py b/tools/dev/verify-history.py
new file mode 100755
index 0000000..a408cc7
--- /dev/null
+++ b/tools/dev/verify-history.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+# This program is used to verify the FS history code.
+#
+# The basic gist is this: given a repository, a path in that
+# repository, and a revision at which to begin plowing through history
+# (towards revision 1), verify that each history object returned by
+# the svn_fs_history_prev() interface -- indirectly via
+# svn_repos_history() -- represents a revision in which the node being
+# tracked actually changed, or where a parent directory of the node
+# was copied, according to the list of paths changed as reported by
+# svn_fs_paths_changed().
+#
+# A fun way to run this:
+#
+# #!/bin/sh
+#
+# export VERIFY=/path/to/verify-history.py
+# export MYREPOS=/path/to/repos
+#
+# # List the paths in HEAD of the repos (filtering out the directories)
+# for VCFILE in `svn ls -R file://${MYREPOS} | grep -v '/$'`; do
+# echo "Checking ${VCFILE}"
+# ${VERIFY} ${MYREPOS} ${VCFILE}
+# done
+
+import sys
+import string
+from svn import core, repos, fs
+
+class HistoryChecker:
+ def __init__(self, fs_ptr):
+ self.fs_ptr = fs_ptr
+
+ def _check_history(self, path, revision):
+ root = fs.revision_root(self.fs_ptr, revision)
+ changes = fs.paths_changed(root)
+ while True:
+ if path in changes:
+ return 1
+ if path == '/':
+ return 0
+ idx = path.rfind('/')
+ if idx != -1:
+ path = path[:idx]
+ else:
+ return 0
+
+ def add_history(self, path, revision, pool=None):
+ if not self._check_history(path, revision):
+ print("**WRONG** %8d %s" % (revision, path))
+ else:
+ print(" %8d %s" % (revision, path))
+
+
+def check_history(fs_ptr, path, revision):
+ history = HistoryChecker(fs_ptr)
+ repos.history(fs_ptr, path, history.add_history, 1, revision, 1)
+
+
+def main():
+ argc = len(sys.argv)
+ if argc < 3 or argc > 4:
+ print("Usage: %s PATH-TO-REPOS PATH-IN-REPOS [REVISION]" % sys.argv[0])
+ sys.exit(1)
+
+ fs_ptr = repos.fs(repos.open(sys.argv[1]))
+ if argc == 3:
+ revision = fs.youngest_rev(fs_ptr)
+ else:
+ revision = int(sys.argv[3])
+ check_history(fs_ptr, sys.argv[2], revision)
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/dev/warn-ignored-err.sh b/tools/dev/warn-ignored-err.sh
new file mode 100755
index 0000000..2e4a106
--- /dev/null
+++ b/tools/dev/warn-ignored-err.sh
@@ -0,0 +1,83 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HELP="\
+Usage: $0 [--remove] [FILE...]
+
+Insert or remove the GCC attribute \"warn_unused_result\" on each function
+that returns a Subversion error, in the specified files or, by default,
+*.h and *.c in the ./subversion and ./tools trees.
+"
+
+LC_ALL=C
+
+# Parse options
+REMOVE=
+case "$1" in
+--remove) REMOVE=1; shift;;
+--help) echo "$HELP"; exit 0;;
+--*) echo "$0: unknown option \"$1\"; try \"--help\""; exit 1;;
+esac
+
+# Set the positional parameters to the default files if none specified
+if [ $# = 0 ]; then
+ set -- `find subversion/ tools/ -name '*.[ch]'`
+fi
+
+# A line that declares a function return type of "svn_error_t *" looks like:
+# - Possibly leading whitespace, though not often.
+# - Possibly "static" or "typedef".
+# - The return type "svn_error_t *".
+# - Possibly a function or pointer-to-function declarator:
+# - "identifier"
+# - "(identifier)" (used in some typedefs)
+# - "(*identifier)"
+# with either nothing more, or a "(" next (especially not "," or ";" or "="
+# which all indicate a variable rather than a function).
+
+# Regular expressions for "sed"
+# Note: take care in matching back-reference numbers to parentheses
+PREFIX="^\( *\| *static *\| *typedef *\)"
+RET_TYPE="\(svn_error_t *\* *\)"
+IDENT="[a-zA-Z_][a-zA-Z0-9_]*"
+DECLR="\($IDENT\|( *\(\*\|\) *$IDENT *)\)"
+SUFFIX="\($DECLR *\((.*\|\)\|\)$"
+
+# The attribute string to be inserted or removed
+ATTRIB_RE="__attribute__((warn_unused_result))" # regex version of it
+ATTRIB_STR="__attribute__((warn_unused_result))" # plain text version of it
+
+if [ $REMOVE ]; then
+ SUBST="s/$PREFIX$ATTRIB_RE $RET_TYPE$SUFFIX/\1\2\3/"
+else
+ SUBST="s/$PREFIX$RET_TYPE$SUFFIX/\1$ATTRIB_STR \2\3/"
+fi
+
+for F do
+ # Edit the file, leaving a backup suffixed with a tilde
+ { sed -e "$SUBST" "$F" > "$F~1" &&
+ { ! cmp -s "$F" "$F~1"; } &&
+ mv "$F" "$F~" && # F is briefly absent now; a copy could avoid this
+ mv "$F~1" "$F"
+ } ||
+ # If anything went wrong or no change was made, remove the temporary file
+ rm "$F~1"
+done
diff --git a/tools/dev/wc-format.py b/tools/dev/wc-format.py
new file mode 100755
index 0000000..3ecfad0
--- /dev/null
+++ b/tools/dev/wc-format.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import os
+import sqlite3
+import sys
+
+MIN_SINGLE_DB_FORMAT = 19
+
+def get_format(wc_path):
+ entries = os.path.join(wc_path, '.svn', 'entries')
+ wc_db = os.path.join(wc_path, '.svn', 'wc.db')
+
+ formatno = 'not under version control'
+
+ if os.path.exists(wc_db):
+ conn = sqlite3.connect(wc_db)
+ curs = conn.cursor()
+ curs.execute('pragma user_version;')
+ formatno = curs.fetchone()[0]
+ elif os.path.exists(entries):
+ formatno = int(open(entries).readline())
+ elif os.path.exists(wc_path):
+ parent_path = os.path.dirname(os.path.abspath(wc_path))
+ if wc_path != parent_path:
+ formatno = get_format(parent_path)
+ if formatno >= MIN_SINGLE_DB_FORMAT:
+ return formatno
+
+ return formatno
+
+def print_format(wc_path):
+ # see subversion/libsvn_wc/wc.h for format values and information
+ # 1.0.x -> 1.3.x: format 4
+ # 1.4.x: format 8
+ # 1.5.x: format 9
+ # 1.6.x: format 10
+ # 1.7.x: format 29
+ formatno = get_format(wc_path)
+ print('%s: %s' % (wc_path, formatno))
+
+
+if __name__ == '__main__':
+ paths = sys.argv[1:]
+ if not paths:
+ paths = ['.']
+ for wc_path in paths:
+ print_format(wc_path)
diff --git a/tools/dev/wc-ng/bump-to-19.py b/tools/dev/wc-ng/bump-to-19.py
new file mode 100755
index 0000000..6f17a1b
--- /dev/null
+++ b/tools/dev/wc-ng/bump-to-19.py
@@ -0,0 +1,357 @@
+#!/usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""This program converts a Subversion WC from 1.7-dev format 18 to
+ 1.7-dev format 19 by migrating data from multiple DBs to a single DB.
+
+ Usage: bump-to-19.py WC_ROOT_DIR
+ where WC_ROOT_DIR is the path to the WC root directory.
+
+ Skips non-WC dirs and WC dirs that are not at format 18."""
+
+# TODO: Detect '_svn' as an alternative to '.svn'.
+
+# TODO: Probably should remove any directory that is in state to-be-deleted
+# and doesn't have its 'keep_local' flag set. Otherwise it will
+# become unversioned after commit, whereas format-18 and earlier would
+# have deleted it after commit. Before deleting we should check there
+# are no unversioned things inside, and maybe even check for "local
+# mods" even though that's logically impossible. On the other hand
+# it's not a big deal for the user to clean these up manually.
+
+
+import sys, os, shutil, sqlite3
+
+dot_svn = '.svn'
+
+def dotsvn_path(wc_path):
+ return os.path.join(wc_path, dot_svn)
+
+def db_path(wc_path):
+ return os.path.join(wc_path, dot_svn, 'wc.db')
+
+def pristine_path(wc_path):
+ return os.path.join(wc_path, dot_svn, 'pristine')
+
+def tmp_path(wc_path):
+ return os.path.join(wc_path, dot_svn, 'tmp')
+
+class NotASubversionWC(Exception):
+ def __init__(self, wc_path):
+ self.wc_path = wc_path
+ def __str__(self):
+ return "not a Subversion WC: '" + self.wc_path + "'"
+
+class WrongFormatException(Exception):
+ def __init__(self, wc_dir, format):
+ self.wc_dir = wc_dir
+ self.format = format
+ def __str__(self):
+ return "format is " + str(self.format) + " not 18: '" + self.wc_dir + "'"
+
+
+
+STMT_COPY_BASE_NODE_TABLE_TO_WCROOT_DB1 = \
+ "INSERT OR REPLACE INTO root.BASE_NODE ( " \
+ " wc_id, local_relpath, repos_id, repos_relpath, parent_relpath, " \
+ " presence, kind, revnum, checksum, translated_size, changed_rev, " \
+ " changed_date, changed_author, depth, symlink_target, last_mod_time, " \
+ " properties, dav_cache, incomplete_children, file_external ) " \
+ "SELECT wc_id, ?1, repos_id, repos_relpath, ?2 AS parent_relpath, " \
+ " presence, kind, revnum, checksum, translated_size, changed_rev, " \
+ " changed_date, changed_author, depth, symlink_target, last_mod_time, " \
+ " properties, dav_cache, incomplete_children, file_external " \
+ "FROM BASE_NODE WHERE local_relpath = ''; "
+
+STMT_COPY_BASE_NODE_TABLE_TO_WCROOT_DB2 = \
+ "INSERT INTO root.BASE_NODE ( " \
+ " wc_id, local_relpath, repos_id, repos_relpath, parent_relpath, " \
+ " presence, kind, revnum, checksum, translated_size, changed_rev, " \
+ " changed_date, changed_author, depth, symlink_target, last_mod_time, " \
+ " properties, dav_cache, incomplete_children, file_external ) " \
+ "SELECT wc_id, ?1 || '/' || local_relpath, repos_id, repos_relpath, " \
+ " ?1 AS parent_relpath, " \
+ " presence, kind, revnum, checksum, translated_size, changed_rev, " \
+ " changed_date, changed_author, depth, symlink_target, last_mod_time, " \
+ " properties, dav_cache, incomplete_children, file_external " \
+ "FROM BASE_NODE WHERE local_relpath != ''; "
+
+STMT_COPY_WORKING_NODE_TABLE_TO_WCROOT_DB1 = \
+ "INSERT OR REPLACE INTO root.WORKING_NODE ( " \
+ " wc_id, local_relpath, parent_relpath, presence, kind, checksum, " \
+ " translated_size, changed_rev, changed_date, changed_author, depth, " \
+ " symlink_target, copyfrom_repos_id, copyfrom_repos_path, copyfrom_revnum, " \
+ " moved_here, moved_to, last_mod_time, properties, keep_local ) " \
+ "SELECT wc_id, ?1, ?2 AS parent_relpath, " \
+ " presence, kind, checksum, " \
+ " translated_size, changed_rev, changed_date, changed_author, depth, " \
+ " symlink_target, copyfrom_repos_id, copyfrom_repos_path, copyfrom_revnum, " \
+ " moved_here, moved_to, last_mod_time, properties, keep_local " \
+ "FROM WORKING_NODE WHERE local_relpath = ''; "
+
+STMT_COPY_WORKING_NODE_TABLE_TO_WCROOT_DB2 = \
+ "INSERT INTO root.WORKING_NODE ( " \
+ " wc_id, local_relpath, parent_relpath, presence, kind, checksum, " \
+ " translated_size, changed_rev, changed_date, changed_author, depth, " \
+ " symlink_target, copyfrom_repos_id, copyfrom_repos_path, copyfrom_revnum, " \
+ " moved_here, moved_to, last_mod_time, properties, keep_local ) " \
+ "SELECT wc_id, ?1 || '/' || local_relpath, ?1 AS parent_relpath, " \
+ " presence, kind, checksum, " \
+ " translated_size, changed_rev, changed_date, changed_author, depth, " \
+ " symlink_target, copyfrom_repos_id, copyfrom_repos_path, copyfrom_revnum, " \
+ " moved_here, moved_to, last_mod_time, properties, keep_local " \
+ "FROM WORKING_NODE WHERE local_relpath != ''; "
+
+STMT_COPY_ACTUAL_NODE_TABLE_TO_WCROOT_DB1 = \
+ "INSERT OR REPLACE INTO root.ACTUAL_NODE ( " \
+ " wc_id, local_relpath, parent_relpath, properties, " \
+ " conflict_old, conflict_new, conflict_working, " \
+ " prop_reject, changelist, text_mod, tree_conflict_data, " \
+ " conflict_data, older_checksum, left_checksum, right_checksum ) " \
+ "SELECT wc_id, ?1, ?2 AS parent_relpath, properties, " \
+ " conflict_old, conflict_new, conflict_working, " \
+ " prop_reject, changelist, text_mod, tree_conflict_data, " \
+ " conflict_data, older_checksum, left_checksum, right_checksum " \
+ "FROM ACTUAL_NODE WHERE local_relpath = ''; "
+
+STMT_COPY_ACTUAL_NODE_TABLE_TO_WCROOT_DB2 = \
+ "INSERT INTO root.ACTUAL_NODE ( " \
+ " wc_id, local_relpath, parent_relpath, properties, " \
+ " conflict_old, conflict_new, conflict_working, " \
+ " prop_reject, changelist, text_mod, tree_conflict_data, " \
+ " conflict_data, older_checksum, left_checksum, right_checksum ) " \
+ "SELECT wc_id, ?1 || '/' || local_relpath, ?1 AS parent_relpath, properties, " \
+ " conflict_old, conflict_new, conflict_working, " \
+ " prop_reject, changelist, text_mod, tree_conflict_data, " \
+ " conflict_data, older_checksum, left_checksum, right_checksum " \
+ "FROM ACTUAL_NODE WHERE local_relpath != ''; "
+
+STMT_COPY_LOCK_TABLE_TO_WCROOT_DB = \
+ "INSERT INTO root.LOCK " \
+ "SELECT * FROM LOCK; "
+
+STMT_COPY_PRISTINE_TABLE_TO_WCROOT_DB = \
+ "INSERT OR REPLACE INTO root.PRISTINE " \
+ "SELECT * FROM PRISTINE; "
+
+STMT_SELECT_SUBDIR = \
+ "SELECT 1 FROM BASE_NODE WHERE local_relpath=?1 AND kind='subdir'" \
+ "UNION " \
+ "SELECT 0 FROM WORKING_NODE WHERE local_relpath=?1 AND kind='subdir';"
+
+def copy_db_rows_to_wcroot(wc_subdir_relpath):
+ """Copy all relevant table rows from the $PWD/WC_SUBDIR_RELPATH/.svn/wc.db
+ into $PWD/.svn/wc.db."""
+
+ wc_root_path = ''
+ wc_subdir_path = wc_subdir_relpath
+ wc_subdir_parent_relpath = os.path.dirname(wc_subdir_relpath)
+
+ try:
+ db = sqlite3.connect(db_path(wc_subdir_path))
+ except:
+ raise NotASubversionWC(wc_subdir_path)
+ c = db.cursor()
+
+ c.execute("ATTACH '" + db_path(wc_root_path) + "' AS 'root'")
+
+ ### TODO: the REPOSITORY table. At present we assume there is only one
+ # repository in use and its repos_id is consistent throughout the WC.
+ # That's not always true - e.g. "svn switch --relocate" creates repos_id
+ # 2, and then "svn mkdir" uses repos_id 1 in the subdirectory. */
+
+ c.execute(STMT_COPY_BASE_NODE_TABLE_TO_WCROOT_DB1,
+ (wc_subdir_relpath, wc_subdir_parent_relpath))
+ c.execute(STMT_COPY_BASE_NODE_TABLE_TO_WCROOT_DB2,
+ (wc_subdir_relpath, ))
+ c.execute(STMT_COPY_WORKING_NODE_TABLE_TO_WCROOT_DB1,
+ (wc_subdir_relpath, wc_subdir_parent_relpath))
+ c.execute(STMT_COPY_WORKING_NODE_TABLE_TO_WCROOT_DB2,
+ (wc_subdir_relpath, ))
+ c.execute(STMT_COPY_ACTUAL_NODE_TABLE_TO_WCROOT_DB1,
+ (wc_subdir_relpath, wc_subdir_parent_relpath))
+ c.execute(STMT_COPY_ACTUAL_NODE_TABLE_TO_WCROOT_DB2,
+ (wc_subdir_relpath, ))
+ c.execute(STMT_COPY_LOCK_TABLE_TO_WCROOT_DB)
+ c.execute(STMT_COPY_PRISTINE_TABLE_TO_WCROOT_DB)
+
+ db.commit()
+ db.close()
+
+
+def move_and_shard_pristine_files(old_wc_path, new_wc_path):
+ """Move all pristine text files from 'OLD_WC_PATH/.svn/pristine/'
+ into 'NEW_WC_PATH/.svn/pristine/??/', creating shard dirs where
+ necessary."""
+
+ old_pristine_dir = pristine_path(old_wc_path)
+ new_pristine_dir = pristine_path(new_wc_path)
+
+ if not os.path.exists(old_pristine_dir):
+ # That's fine, assuming there are no pristine texts.
+ return
+
+ for basename in os.listdir(old_pristine_dir):
+ shard = basename[:2]
+ if shard == basename: # already converted
+ continue
+ old = os.path.join(old_pristine_dir, basename)
+ new = os.path.join(new_pristine_dir, shard, basename)
+ os.renames(old, new)
+
+def select_subdir(wc_subdir_path):
+ """ Return True if wc_subdir_path is a known to be a versioned subdir,
+ False otherwise."""
+
+ try:
+ db = sqlite3.connect(db_path(''))
+ except:
+ raise NotASubversionWC(wc_subdir_path)
+ c = db.cursor()
+ c.execute(STMT_SELECT_SUBDIR, (wc_subdir_path,))
+ if c.fetchone() is None:
+ return False
+ else:
+ return True
+
+
+def migrate_wc_subdirs(wc_root_path):
+ """Move Subversion metadata from the admin dir of each subdirectory
+ below WC_ROOT_PATH into WC_ROOT_PATH's own admin dir."""
+
+ old_cwd = os.getcwd()
+ os.chdir(wc_root_path)
+
+ # Keep track of which dirs we've migrated so we can delete their .svn's
+ # afterwards. Done this way because the tree walking is top-down and if
+ # we deleted the .svn before walking into the subdir, it would look like
+ # an unversioned subdir.
+ migrated_subdirs = []
+
+ # For each directory in the WC, try to migrate each of its subdirs (DIRS).
+ # Done this way because (a) os.walk() gives us lists of subdirs, and (b)
+ # it's easy to skip the WC root dir.
+ for dir_path, dirs, files in os.walk('.'):
+
+ # don't walk into the '.svn' subdirectory
+ try:
+ dirs.remove(dot_svn)
+ except ValueError:
+ # a non-WC dir: don't walk into any subdirectories
+ print("skipped: ", NotASubversionWC(dir_path))
+ del dirs[:]
+ continue
+
+ # Try to migrate each other subdirectory
+ for dir in dirs[:]: # copy so we can remove some
+ wc_subdir_path = os.path.join(dir_path, dir)
+ if wc_subdir_path.startswith('./'):
+ wc_subdir_path = wc_subdir_path[2:]
+
+ if not select_subdir(wc_subdir_path):
+ print("skipped:", wc_subdir_path)
+ dirs.remove(dir)
+ continue
+
+ try:
+ check_wc_format_number(wc_subdir_path)
+ print("migrating '" + wc_subdir_path + "'")
+ copy_db_rows_to_wcroot(wc_subdir_path)
+ move_and_shard_pristine_files(wc_subdir_path, '.')
+ migrated_subdirs += [wc_subdir_path]
+ except (WrongFormatException, NotASubversionWC) as e:
+ print("skipped:", e)
+ # don't walk into it
+ dirs.remove(dir)
+ continue
+
+ # Delete the remaining parts of the migrated .svn dirs
+ # Make a note of any problems in deleting.
+ failed_delete_subdirs = []
+ for wc_subdir_path in migrated_subdirs:
+ print("deleting " + dotsvn_path(wc_subdir_path))
+ try:
+ os.remove(db_path(wc_subdir_path))
+ if os.path.exists(pristine_path(wc_subdir_path)):
+ os.rmdir(pristine_path(wc_subdir_path))
+ shutil.rmtree(tmp_path(wc_subdir_path))
+ os.rmdir(dotsvn_path(wc_subdir_path))
+ except Exception as e:
+ print(e)
+ failed_delete_subdirs += [wc_subdir_path]
+
+ # Notify any problems in deleting
+ if failed_delete_subdirs:
+ print("Failed to delete the following directories. Please delete them manually.")
+ for wc_subdir_path in failed_delete_subdirs:
+ print(" " + dotsvn_path(wc_subdir_path))
+
+ os.chdir(old_cwd)
+
+
+def check_wc_format_number(wc_path):
+ """Check that the WC format of the WC dir WC_PATH is 18.
+ Raise a WrongFormatException if not."""
+
+ try:
+ db = sqlite3.connect(db_path(wc_path))
+ except sqlite3.OperationalError:
+ raise NotASubversionWC(wc_path)
+ c = db.cursor()
+ c.execute("PRAGMA user_version;")
+ format = c.fetchone()[0]
+ db.commit()
+ db.close()
+
+ if format != 18:
+ raise WrongFormatException(wc_path, format)
+
+
+def bump_wc_format_number(wc_path):
+ """Bump the WC format number of the WC dir WC_PATH to 19."""
+
+ try:
+ db = sqlite3.connect(db_path(wc_path))
+ except sqlite3.OperationalError:
+ raise NotASubversionWC(wc_path)
+ c = db.cursor()
+ c.execute("PRAGMA user_version = 19;")
+ db.commit()
+ db.close()
+
+
+if __name__ == '__main__':
+
+ if len(sys.argv) != 2:
+ print(__doc__)
+ sys.exit(1)
+
+ wc_root_path = sys.argv[1]
+
+ try:
+ check_wc_format_number(wc_root_path)
+ except (WrongFormatException, NotASubversionWC) as e:
+ print("error:", e)
+ sys.exit(1)
+
+ print("merging subdir DBs into single DB '" + wc_root_path + "'")
+ move_and_shard_pristine_files(wc_root_path, wc_root_path)
+ migrate_wc_subdirs(wc_root_path)
+ bump_wc_format_number(wc_root_path)
+
diff --git a/tools/dev/wc-ng/count-progress.py b/tools/dev/wc-ng/count-progress.py
new file mode 100755
index 0000000..bf06512
--- /dev/null
+++ b/tools/dev/wc-ng/count-progress.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+import os, sys
+
+SKIP = ['deprecated.c',
+ 'entries.c',
+ 'entries.h',
+ 'old-and-busted.c']
+
+TERMS = ['svn_wc_adm_access_t',
+ 'svn_wc_entry_t',
+ 'svn_wc__node_',
+ 'svn_wc__db_temp_',
+ 'svn_wc__db_node_hidden',
+ 'svn_wc__loggy',
+ 'svn_wc__db_wq_add',
+ ]
+
+
+def get_files_in(path):
+ names = os.listdir(path)
+ for skip in SKIP:
+ try:
+ names.remove(skip)
+ except ValueError:
+ pass
+ return [os.path.join(path, fname) for fname in names
+ if fname.endswith('.c') or fname.endswith('.h')]
+
+
+def count_terms_in(path):
+ files = get_files_in(path)
+ counts = {}
+ for term in TERMS:
+ counts[term] = 0
+ for filepath in get_files_in(path):
+ contents = open(filepath).read()
+ for term in TERMS:
+ counts[term] += contents.count(term)
+ return counts
+
+
+def print_report(wcroot):
+ client = count_terms_in(os.path.join(wcroot, 'subversion', 'libsvn_client'))
+ wc = count_terms_in(os.path.join(wcroot, 'subversion', 'libsvn_wc'))
+
+ client_total = 0
+ wc_total = 0
+
+ FMT = '%22s |%14s |%10s |%6s'
+ SEP = '%s+%s+%s+%s' % (23*'-', 15*'-', 11*'-', 7*'-')
+
+ print(FMT % ('', 'libsvn_client', 'libsvn_wc', 'Total'))
+ print(SEP)
+ for term in TERMS:
+ print(FMT % (term, client[term], wc[term], client[term] + wc[term]))
+ client_total += client[term]
+ wc_total += wc[term]
+ print(SEP)
+ print(FMT % ('Total', client_total, wc_total, client_total + wc_total))
+
+
+def usage():
+ print("""\
+Usage: %s [WCROOT]
+ %s --help
+
+Show statistics related to outstanding WC-NG code conversion work
+items in working copy branch root WCROOT. If WCROOT is omitted, this
+program will attempt to guess it using the assumption that it is being
+run from within the working copy of interest."""
+% (sys.argv[0], sys.argv[0]))
+
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ if len(sys.argv) > 1:
+ if '--help' in sys.argv[1:]:
+ usage()
+
+ print_report(sys.argv[1])
+ else:
+ cwd = os.path.abspath(os.getcwd())
+ idx = cwd.rfind(os.sep + 'subversion')
+ if idx > 0:
+ wcroot = cwd[:idx]
+ else:
+ idx = cwd.rfind(os.sep + 'tools')
+ if idx > 0:
+ wcroot = cwd[:idx]
+ elif os.path.exists(os.path.join(cwd, 'subversion')):
+ wcroot = cwd
+ else:
+ print("ERROR: the root of 'trunk' cannot be located -- please provide")
+ sys.exit(1)
+ print_report(wcroot)
diff --git a/tools/dev/wc-ng/gather-data.sh b/tools/dev/wc-ng/gather-data.sh
new file mode 100755
index 0000000..fe481b9
--- /dev/null
+++ b/tools/dev/wc-ng/gather-data.sh
@@ -0,0 +1,78 @@
+#/usr/bin/env sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# Trap Ctrl-C
+trap 'exit 1' 2
+
+# Some useful variables
+REPOS=file:///home/hwright/dev/test/svn-mirror
+WC=blech
+REV_LIST=revs_list
+SCRIPT=count-progress.py
+DATA=data.csv
+
+# Sync up the local repo
+svnsync sync $REPOS
+
+# Grab the list of revisions of interest on trunk
+svn log -q -r0:HEAD $REPOS/trunk \
+ | grep -v '^----' \
+ | cut -f1 -d '|' \
+ | cut -b2- > $REV_LIST
+
+# Export the counting script
+if [ -e $SCRIPT ]; then
+ rm $SCRIPT
+fi
+svn export $REPOS/trunk/tools/dev/wc-ng/$SCRIPT $SCRIPT
+
+# Checkout a working copy
+if [ ! -d "$WC" ]; then
+ svn co $REPOS/trunk $WC -r1
+fi
+
+# Get all the symbols of interest from the counting script and write
+# them out at the headers in our csv file
+LINE=""
+for l in `./$SCRIPT $WC | tail -n +3 | grep -v '^----' | cut -f 1 -d '|'`; do
+ LINE="$LINE,$l"
+done
+echo "Revision$LINE" > $DATA
+
+# Iterate over all the revisions of interest
+export SVN_I_LOVE_CORRUPTED_WORKING_COPIES_SO_DISABLE_SLEEP_FOR_TIMESTAMPS='yes'
+for r in `cat $REV_LIST`; do
+ svn up -r$r $WC -q
+
+ # Do the count for that rev, and put the data in our data file
+ LINE=""
+ for l in `./$SCRIPT $WC | tail -n +3 | grep -v '^----' | cut -f 4 -d '|'`; do
+ LINE="$LINE,$l"
+ done
+ echo "$r$LINE" >> $DATA
+
+ echo "Done with revision $r"
+done
+unset SVN_I_LOVE_CORRUPTED_WORKING_COPIES_SO_DISABLE_SLEEP_FOR_TIMESTAMPS
+
+# Cleanup
+rm -rf $WC
+rm $REV_LIST
diff --git a/tools/dev/wc-ng/graph-data.py b/tools/dev/wc-ng/graph-data.py
new file mode 100755
index 0000000..a8f0c5e
--- /dev/null
+++ b/tools/dev/wc-ng/graph-data.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+import matplotlib.mlab as mlab
+import matplotlib.pyplot as plt
+from matplotlib import pylab
+import numpy as np
+
+import csv
+import sys
+
+min_rev = 35000
+
+data_reader = csv.reader(open('data.csv'))
+
+data = []
+for row in data_reader:
+ row = row[:-1]
+ if row[0] == 'Revision':
+ data.append(row)
+ continue
+
+ if int(row[0]) < min_rev:
+ continue
+
+ for i, x in enumerate(row):
+ if i <= 1:
+ row[i] = int(row[i])
+ else:
+ row[i] = int(row[i-1]) + int(row[i])
+ data.append(row)
+
+x = [d[0] for d in data[1:]]
+data = [d[1:] for d in data]
+y = zip(*data)
+
+l = []
+for i, foo in enumerate(y):
+ ln = plt.plot(x, foo[1:], linewidth=1)
+ l.append(ln)
+
+plt.figlegend(l, data[0], 'lower left')
+plt.fill_between(x, 0, y[0][1:], facecolor=l[0].color)
+#for i in range(0, len(y)-1):
+# plt.fill_between(x, y[i][1:], y[i+1][1:])
+plt.xlabel('Revision')
+plt.ylabel('Symbol Count')
+plt.show()
+
+png = open('chart2.png', 'w')
+plt.savefig(png)
diff --git a/tools/dev/wc-ng/populate-pristine.py b/tools/dev/wc-ng/populate-pristine.py
new file mode 100755
index 0000000..4778cfc
--- /dev/null
+++ b/tools/dev/wc-ng/populate-pristine.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""
+A script that takes a .svn/pristine/ hierarchy, with its existing
+.svn/wc.db database, and populates the database's PRISTINE table
+accordingly. (Use 'svn cleanup' to remove unreferenced pristines.)
+
+Usage:
+
+ %s /path/to/wc [...]
+"""
+
+# TODO: resolve the NotImplemented() in __main__
+
+# TODO: increment refcount upon collision
+# TODO: add <given file>, not just argv[1]/.svn/pristine/??/*
+
+import hashlib
+import os
+import re
+import sqlite3
+import sys
+
+# ### This could require any other format that has the same PRISTINE schema
+# ### and semantics.
+FORMAT = 22
+BUFFER_SIZE = 4 * 1024
+
+class UnknownFormat(Exception):
+ def __init__(self, formatno):
+ self.formatno = formatno
+
+def open_db(wc_path):
+ wc_db = os.path.join(wc_path, '.svn', 'wc.db')
+ conn = sqlite3.connect(wc_db)
+ curs = conn.cursor()
+ curs.execute('pragma user_version;')
+ formatno = int(curs.fetchone()[0])
+ if formatno > FORMAT:
+ raise UnknownFormat(formatno)
+ return conn
+
+_sha1_re = re.compile(r'^[0-9a-f]{40}$')
+
+def md5_of(path):
+ fd = os.open(path, os.O_RDONLY)
+ ctx = hashlib.md5()
+ while True:
+ s = os.read(fd, BUFFER_SIZE)
+ if len(s):
+ ctx.update(s)
+ else:
+ os.close(fd)
+ return ctx.hexdigest()
+
+INSERT_QUERY = """
+ INSERT OR REPLACE
+ INTO pristine(checksum,compression,size,refcount,md5_checksum)
+ VALUES (?,?,?,?,?)
+"""
+
+def populate(wc_path):
+ conn = open_db(wc_path)
+ sys.stdout.write("Updating '%s': " % wc_path)
+ for dirname, dirs, files in os.walk(os.path.join(wc_path, '.svn/pristine/')):
+ # skip everything but .svn/pristine/xx/
+ if os.path.basename(os.path.dirname(dirname)) == 'pristine':
+ sys.stdout.write("'%s', " % os.path.basename(dirname))
+ for f in filter(lambda x: _sha1_re.match(x), files):
+ fullpath = os.path.join(dirname, f)
+ conn.execute(INSERT_QUERY,
+ ('$sha1$'+f, None, os.stat(fullpath).st_size, 1,
+ '$md5 $'+md5_of(fullpath)))
+ # periodic transaction commits, for efficiency
+ conn.commit()
+ else:
+ sys.stdout.write(".\n")
+
+if __name__ == '__main__':
+ raise NotImplemented("""Subversion does not know yet to avoid fetching
+ a file when a file with matching sha1 appears in the PRISTINE table.""")
+
+ paths = sys.argv[1:]
+ if not paths:
+ paths = ['.']
+ for wc_path in paths:
+ try:
+ populate(wc_path)
+ except UnknownFormat as e:
+ sys.stderr.write("Don't know how to handle '%s' (format %d)'\n"
+ % (wc_path, e.formatno))
diff --git a/tools/dev/wc-ng/svn-wc-db-tester.c b/tools/dev/wc-ng/svn-wc-db-tester.c
new file mode 100644
index 0000000..ccdd102
--- /dev/null
+++ b/tools/dev/wc-ng/svn-wc-db-tester.c
@@ -0,0 +1,269 @@
+/* svn-wc-db-tester.c
+ *
+ * This is a crude command line tool that makes it possible to
+ * run the wc-db validation checks directly.
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "svn_cmdline.h"
+#include "svn_pools.h"
+#include "svn_wc.h"
+#include "svn_utf.h"
+#include "svn_path.h"
+#include "svn_opt.h"
+#include "svn_version.h"
+
+#include "private/svn_wc_private.h"
+#include "private/svn_cmdline_private.h"
+
+#include "../../../subversion/libsvn_wc/wc.h"
+#include "../../../subversion/libsvn_wc/wc_db.h"
+
+#include "svn_private_config.h"
+
+#define OPT_VERSION SVN_OPT_FIRST_LONGOPT_ID
+
+static svn_error_t *
+version(apr_pool_t *pool)
+{
+ return svn_opt_print_help4(NULL, "svn-wc-db-tester", TRUE, FALSE, FALSE,
+ NULL, NULL, NULL, NULL, NULL, NULL, pool);
+}
+
+static void
+usage(apr_pool_t *pool)
+{
+ svn_error_clear(svn_cmdline_fprintf
+ (stderr, pool,
+ _("Type 'svn-wc-db-tester --help' for usage.\n")));
+}
+
+struct verify_baton
+{
+ svn_boolean_t found_err;
+};
+
+static svn_error_t *
+verify_cb(void *baton,
+ const char *wc_abspath,
+ const char *local_relpath,
+ int op_depth,
+ int id,
+ const char *msg,
+ apr_pool_t *scratch_pool)
+{
+ struct verify_baton *vb = baton;
+
+ if (op_depth >= 0)
+ {
+ SVN_ERR(svn_cmdline_printf(scratch_pool, "%s (depth=%d) DBV%04d: %s\n",
+ local_relpath, op_depth, id, msg));
+ }
+ else
+ {
+ SVN_ERR(svn_cmdline_printf(scratch_pool, "%s DBV%04d: %s\n",
+ local_relpath, id, msg));
+ }
+
+ vb->found_err = TRUE;
+ return SVN_NO_ERROR;
+}
+
+static svn_error_t *
+verify_db(int argc, const char *path, apr_pool_t *pool)
+{
+ const char *local_abspath;
+ svn_wc_context_t *wc_ctx;
+ struct verify_baton vb = { FALSE };
+
+ /* Read the parameters */
+ path = svn_dirent_internal_style(path, pool);
+
+ SVN_ERR(svn_dirent_get_absolute(&local_abspath, path, pool));
+
+ SVN_ERR(svn_wc_context_create(&wc_ctx, NULL, pool, pool));
+
+ SVN_ERR(svn_wc__db_verify_db_full(wc_ctx->db, local_abspath,
+ verify_cb, &vb, pool));
+
+ if (vb.found_err)
+ return svn_error_create(SVN_ERR_WC_PATH_UNEXPECTED_STATUS, NULL,
+ _("Found one or more potential wc.db inconsistencies"));
+
+ return SVN_NO_ERROR;
+}
+
+
+static void
+help(const apr_getopt_option_t *options, apr_pool_t *pool)
+{
+ svn_error_clear
+ (svn_cmdline_fprintf
+ (stdout, pool,
+ _("usage: svn-wc-db-tester [OPTIONS] WC_PATH\n\n"
+ " Run verifications on the working copy\n"
+ "\n"
+ " WC_PATH's parent directory must be a working copy, otherwise a\n"
+ " tree conflict cannot be raised.\n"
+ "\n"
+ "Valid options:\n")));
+ while (options->description)
+ {
+ const char *optstr;
+ svn_opt_format_option(&optstr, options, TRUE, pool);
+ svn_error_clear(svn_cmdline_fprintf(stdout, pool, " %s\n", optstr));
+ ++options;
+ }
+}
+
+
+/* Version compatibility check */
+static svn_error_t *
+check_lib_versions(void)
+{
+ static const svn_version_checklist_t checklist[] =
+ {
+ { "svn_subr", svn_subr_version },
+ { "svn_wc", svn_wc_version },
+ { NULL, NULL }
+ };
+ SVN_VERSION_DEFINE(my_version);
+
+ return svn_ver_check_list2(&my_version, checklist, svn_ver_equal);
+}
+
+/*
+ * On success, leave *EXIT_CODE untouched and return SVN_NO_ERROR. On error,
+ * either return an error to be displayed, or set *EXIT_CODE to non-zero and
+ * return SVN_NO_ERROR.
+ */
+static svn_error_t *
+sub_main(int *exit_code, int argc, const char *argv[], apr_pool_t *pool)
+{
+ apr_getopt_t *os;
+ const apr_getopt_option_t options[] =
+ {
+ {"help", 'h', 0, N_("display this help")},
+ {"version", OPT_VERSION, 0,
+ N_("show program version information")},
+ {0, 0, 0, 0}
+ };
+ apr_array_header_t *remaining_argv;
+
+ /* Check library versions */
+ SVN_ERR(check_lib_versions());
+
+#if defined(WIN32) || defined(__CYGWIN__)
+ /* Set the working copy administrative directory name. */
+ if (getenv("SVN_ASP_DOT_NET_HACK"))
+ {
+ SVN_ERR(svn_wc_set_adm_dir("_svn", pool));
+ }
+#endif
+
+ SVN_ERR(svn_cmdline__getopt_init(&os, argc, argv, pool));
+
+ os->interleave = 1;
+ while (1)
+ {
+ int opt;
+ const char *arg;
+ apr_status_t status = apr_getopt_long(os, options, &opt, &arg);
+ if (APR_STATUS_IS_EOF(status))
+ break;
+ if (status != APR_SUCCESS)
+ {
+ usage(pool);
+ *exit_code = EXIT_FAILURE;
+ return SVN_NO_ERROR;
+ }
+
+ switch (opt)
+ {
+ case 'h':
+ help(options, pool);
+ return SVN_NO_ERROR;
+ case OPT_VERSION:
+ SVN_ERR(version(pool));
+ return SVN_NO_ERROR;
+ default:
+ usage(pool);
+ *exit_code = EXIT_FAILURE;
+ return SVN_NO_ERROR;
+ }
+ }
+
+ /* Convert the remaining arguments to UTF-8. */
+ remaining_argv = apr_array_make(pool, 0, sizeof(const char *));
+ while (os->ind < argc)
+ {
+ const char *s;
+
+ SVN_ERR(svn_utf_cstring_to_utf8(&s, os->argv[os->ind++], pool));
+ APR_ARRAY_PUSH(remaining_argv, const char *) = s;
+ }
+
+ if (remaining_argv->nelts != 1)
+ {
+ usage(pool);
+ *exit_code = EXIT_FAILURE;
+ return SVN_NO_ERROR;
+ }
+
+ /* Do the main task */
+ SVN_ERR(verify_db(remaining_argv->nelts,
+ APR_ARRAY_IDX(remaining_argv, 0, const char *),
+ pool));
+
+ return SVN_NO_ERROR;
+}
+
+int
+main(int argc, const char *argv[])
+{
+ apr_pool_t *pool;
+ int exit_code = EXIT_SUCCESS;
+ svn_error_t *err;
+
+ /* Initialize the app. */
+ if (svn_cmdline_init("svn-wc-db-tester", stderr) != EXIT_SUCCESS)
+ return EXIT_FAILURE;
+
+ /* Create our top-level pool. Use a separate mutexless allocator,
+ * given this application is single threaded.
+ */
+ pool = apr_allocator_owner_get(svn_pool_create_allocator(FALSE));
+
+ err = sub_main(&exit_code, argc, argv, pool);
+
+ /* Flush stdout and report if it fails. It would be flushed on exit anyway
+ but this makes sure that output is not silently lost if it fails. */
+ err = svn_error_compose_create(err, svn_cmdline_fflush(stdout));
+
+ if (err)
+ {
+ exit_code = EXIT_FAILURE;
+ svn_cmdline_handle_exit_error(err, NULL, "svn-wc-db-tester: ");
+ }
+
+ svn_pool_destroy(pool);
+ return exit_code;
+}
diff --git a/tools/dev/which-error.py b/tools/dev/which-error.py
new file mode 100755
index 0000000..6c683cb
--- /dev/null
+++ b/tools/dev/which-error.py
@@ -0,0 +1,142 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# which-error.py: Print semantic Subversion error code names mapped from
+# their numeric error code values
+#
+# ====================================================================
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ====================================================================
+#
+# $HeadURL: https://svn.apache.org/repos/asf/subversion/branches/1.10.x/tools/dev/which-error.py $
+# $LastChangedDate: 2016-04-30 08:16:53 +0000 (Sat, 30 Apr 2016) $
+# $LastChangedBy: stefan2 $
+# $LastChangedRevision: 1741723 $
+#
+
+import errno
+import sys
+import os.path
+import re
+
+try:
+ from svn import core
+except ImportError as e:
+ sys.stderr.write("ERROR: Unable to import Subversion's Python bindings: '%s'\n" \
+ "Hint: Set your PYTHONPATH environment variable, or adjust your " \
+ "PYTHONSTARTUP\nfile to point to your Subversion install " \
+ "location's svn-python directory.\n" % e)
+ sys.stderr.flush()
+ sys.exit(1)
+
+
+def usage_and_exit():
+ progname = os.path.basename(sys.argv[0])
+ sys.stderr.write("""Usage: 1. %s ERRNUM [...]
+ 2. %s parse
+ 3. %s list
+
+Print numeric and semantic error code information for Subversion error
+codes. This can be done in variety of ways:
+
+ 1. For each ERRNUM, list the error code information.
+
+ 2. Parse standard input as if it was error stream from a debug-mode
+ Subversion command-line client, echoing that input to stdout,
+ followed by the error code information for codes found in use in
+ that error stream.
+
+ 3. Simply list the error code information for all known such
+ mappings.
+
+""" % (progname, progname, progname))
+ sys.exit(1)
+
+def get_errors():
+ errs = {}
+ ## errno values.
+ errs.update(errno.errorcode)
+ ## APR-defined errors, from apr_errno.h.
+ dirname = os.path.dirname(os.path.realpath(__file__))
+ for line in open(os.path.join(dirname, 'aprerr.txt')):
+ # aprerr.txt parsing duplicated in gen_base.py:write_errno_table()
+ if line.startswith('#'):
+ continue
+ key, _, val = line.split()
+ errs[int(val)] = key
+ ## Subversion errors, from svn_error_codes.h.
+ for key in vars(core):
+ if key.find('SVN_ERR_') == 0:
+ try:
+ val = int(vars(core)[key])
+ errs[val] = key
+ except:
+ pass
+ return errs
+
+def print_error(code):
+ try:
+ print('%08d %s' % (code, __svn_error_codes[code]))
+ except KeyError:
+ if code == -41:
+ print("Sit by a lake.")
+ elif code >= 120100 and code < 121000:
+ print('%08d <error code from libserf; see serf.h>' % (code))
+ else:
+ print('%08d *** UNKNOWN ERROR CODE ***' % (code))
+
+if __name__ == "__main__":
+ global __svn_error_codes
+ __svn_error_codes = get_errors()
+ codes = []
+ if len(sys.argv) < 2:
+ usage_and_exit()
+
+ # Get a list of known codes
+ if sys.argv[1] == 'list':
+ if len(sys.argv) > 2:
+ usage_and_exit()
+ codes = sorted(__svn_error_codes.keys())
+
+ # Get a list of code by parsing stdin for apr_err=CODE instances
+ elif sys.argv[1] == 'parse':
+ if len(sys.argv) > 2:
+ usage_and_exit()
+ while True:
+ line = sys.stdin.readline()
+ if not line:
+ break
+ sys.stdout.write(line)
+ match = re.match(r'^.*apr_err=([0-9]+)[^0-9].*$', line)
+ if match:
+ codes.append(int(match.group(1)))
+
+ # Get the list of requested codes
+ else:
+ for code in sys.argv[1:]:
+ try:
+ code = code.lstrip('EW')
+ codes.append(int(code))
+ except ValueError:
+ usage_and_exit()
+
+ # Print the harvest codes
+ for code in codes:
+ print_error(code)
+
+
diff --git a/tools/dev/windows-build/Makefile b/tools/dev/windows-build/Makefile
new file mode 100644
index 0000000..c0e1b15
--- /dev/null
+++ b/tools/dev/windows-build/Makefile
@@ -0,0 +1,155 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+CONFIG=debug
+#CONFIG=release
+# will be appended to 'svn --version --quiet' output; set to zero to suppress
+BUILDDESCR=0
+
+MSBUILD=msbuild subversion_vcnet.sln /nologo /v:q /p:Configuration=$(CONFIG)
+CP=cp
+TEEPATH=C:\Path\To\Parent\Of\tee\dot\exe\and\other\utils\as\needed\see\below.80
+
+SOURCESDIR=C:\Path\To\Dependency\Sources
+SVNDIR=C:\Path\To\Subversion\SourceTree
+TARGETDIR=$(SVNDIR)\dist
+INSTALLDIR=E:\svn
+
+EXPATVER=2.0.0
+HTTPDVER=2.2.13
+SERFVER=1.1.0
+OPENSSLVER=0.9.8k
+SQLITEVER=3.6.3
+ZLIBVER=1.2.3
+#ENABLE_ML=--enable-ml
+
+PATCHESDIR=$(HOME)\mydiffs\svn
+OPENSSLDIR=$(SOURCESDIR)\openssl-$(OPENSSLVER)
+EXPATDIR=$(SOURCESDIR)\expat-$(EXPATVER)
+HTTPDDIR=$(SOURCESDIR)\httpd-$(HTTPDVER)
+#APRDIR=$(SOURCESDIR)\apr
+#APRUTILDIR=$(SOURCESDIR)\apr-util
+#APRICONVDIR=$(SOURCESDIR)\apr-iconv
+APRDIR=$(HTTPDDIR)\srclib\apr
+APRUTILDIR=$(HTTPDDIR)\srclib\apr-util
+APRICONVDIR=$(HTTPDDIR)\srclib\apr-iconv
+SQLITEDIR=$(SOURCESDIR)\sqlite-amalgamation
+ZLIBDIR=$(SOURCESDIR)\zlib-$(ZLIBVER)
+SERFDIR=$(SOURCESDIR)\serf-$(SERFVER)
+
+all:
+ @echo Available targets: newfiles versionstamp
+ @echo Available targets: config
+ @echo Available targets: progname testname
+ @echo Available targets: all1 all2
+ @echo Available targets: buildlog package
+ @echo Available targets: check checklog
+TARGETDIRset: SVNDIRset
+ if X$(TARGETDIR) == X exit 1
+SVNDIRset:
+ if X$(SVNDIR) == X exit 1
+
+removealllocalmods:
+ svn revert -R .
+ svn status --no-ignore | xargs rm -rf --
+ svn status --no-ignore
+ # last, in case of wc format bump
+ rmdir /s /q dist local Release Debug
+
+newfiles: SVNDIRset
+ xcopy /s /y $(PATCHESDIR)\newfiles $(SVNDIR)
+versionstamp:
+ perl tools\dev\windows-build\document-version.pl subversion\include\svn_version.h $(TARGETDIR) $(SVNDIR) $(BUILDDESCR)
+ svn diff subversion\include\svn_version.h
+
+cleanup1: TARGETDIR
+ del log.all-tests log.gen-make.py log.devenv log.win-tests
+ rmdir /s /q $(TARGETDIR)\bin
+
+clean:
+ @echo "Sorry, '$@' target not yet implemented" >&2
+# TODO also copy sqlite3.dll if it's used
+install: TARGETDIRset
+ test ! -d $(INSTALLDIR)
+ mkdir $(INSTALLDIR)\bin
+ pushd $(TARGETDIR)\bin &&\
+ $(CP) *.exe $(INSTALLDIR)/bin &&\
+ $(CP) libapr*.dll $(INSTALLDIR)/bin &&\
+ $(CP) libeay32.dll $(INSTALLDIR)/bin &&\
+ $(CP) ssleay32.dll $(INSTALLDIR)/bin &&\
+ $(CP) libsvn*.dll $(INSTALLDIR)/bin &&\
+ $(CP) ..\*.diff $(INSTALLDIR) &&\
+ popd
+
+targetdir: TARGETDIRset
+ test -d $(TARGETDIR)\bin || mkdir $(TARGETDIR)\bin
+
+# TODO: pass --with-apr-* if you don't have httpd; make --with-* args optional
+config: targetdir
+ python gen-make.py --$(CONFIG) --with-httpd=$(HTTPDDIR) --with-serf=$(SERFDIR) --with-openssl=$(OPENSSLDIR) --with-sqlite=$(SQLITEDIR) --with-zlib=$(ZLIBDIR) $(ENABLE_ML) --vsnet-version=2008 -t vcproj 2>&1 | tee log.gen-make
+
+# Visual Studio 2008
+libsvn_auth_gnome_keyring libsvn_auth_kwallet libsvn_client libsvn_delta libsvn_diff libsvn_fs libsvn_fs_base libsvn_fs_fs libsvn_fs_util libsvn_ra libsvn_ra_local libsvn_ra_serf libsvn_ra_svn libsvn_repos libsvn_subr libsvn_wc: targetdir
+ $(MSBUILD) /t:Libraries\$@
+ $(MAKE) package
+svn svnadmin svndumpfilter svnlook svnmucc svnserve svnsync svnversion svnrdump entries-dump: targetdir
+ $(MSBUILD) /t:Programs\$@
+ $(MAKE) package
+auth-test cache-test changes-test checksum-test client-test compat-test config-test db-test diff-diff3-test dir-delta-editor dirent_uri-test error-test fs-base-test fs-pack-test fs-test hashdump-test key-test locks-test mergeinfo-test opt-test path-test ra-local-test random-test repos-test revision-test skel-test stream-test string-test strings-reps-test svn_test_fs svn_test_main svndiff-test target-test time-test translate-test tree-conflict-data-test utf-test vdelta-test window-test: targetdir
+ $(MSBUILD) /t:Tests\$@
+ $(MAKE) package
+
+__ALL__ __ALL_TESTS__: targetdir
+ $(MSBUILD) /t:$@
+ $(MAKE) package
+all1: targetdir
+ $(MSBUILD) /t:__ALL__
+ $(MAKE) package
+ @echo TODO entries-test
+all2: targetdir
+ $(MSBUILD) /t:__ALL_TESTS__
+ $(MAKE) package
+
+package:
+ test -d $(SVNDIR)\$(CONFIG)\Subversion\tests\cmdline || mkdir $(SVNDIR)\$(CONFIG)\Subversion\tests\cmdline
+ test -d $(TARGETDIR)\bin || mkdir $(TARGETDIR)\bin
+ for %%i in (svn svnadmin svndumpfilter svnlook svnserve svnsync svnversion svnrdump svnmucc) do @$(CP) $(CONFIG)\subversion\%%i\%%i.exe $(TARGETDIR)\bin
+ for %%i in (diff diff3 diff4) do @if exist $(CONFIG)\tools\diff\%%i.exe $(CP) $(CONFIG)\tools\diff\%%i.exe $(TARGETDIR)\bin
+ $(CP) $(APRDIR)\$(CONFIG)/*.dll $(TARGETDIR)\bin
+ $(CP) $(APRUTILDIR)\$(CONFIG)/*.dll $(TARGETDIR)\bin
+ $(CP) $(APRICONVDIR)\$(CONFIG)/*.dll $(TARGETDIR)\bin
+ $(CP) $(OPENSSLDIR)\out32dll/*.dll $(TARGETDIR)\bin
+ for %%i in (client delta diff fs ra repos subr wc) do @$(CP) $(CONFIG)\subversion\libsvn_%%i\*.dll $(TARGETDIR)\bin
+
+buildlog:
+ gvim -c "set autoread nowrap" -c "/\(\<0 \)\@<!error" log.devenv
+# 'make check'
+# TODO: also support svncheck, etc
+check:
+ echo %date% %time% :: Starting fsfs file >> log.all-tests
+ python win-tests.py --verbose --cleanup --bin=$(TARGETDIR)\bin --$(CONFIG) -f fsfs 2>&1 | %TEEPATH%\tee log.win-tests
+ echo %date% %time% :: Finished fsfs file >> log.all-tests
+
+
+# check errors
+checklog:
+ gvim -c "set autoread" -p log.win-tests *\*.log "+silent! /X\@<!FAIL\|XPASS"
+
+tags: .
+ REM vim +Ctags +quit
+ ctags -R .
+ $(CP) tags ..\svntags
diff --git a/tools/dev/windows-build/README b/tools/dev/windows-build/README
new file mode 100644
index 0000000..cd05cd8
--- /dev/null
+++ b/tools/dev/windows-build/README
@@ -0,0 +1,22 @@
+Makefiles for automating the Windows build.
+
+Should work either either nmake or GNU make.
+
+Doesn't require Cygwin.
+
+* TODO:
+ - document: how to use
+ - known bugs/shortcomings
+ - separate the configurable parts to a Makefile.local.tmpl file
+ - allow serf,httpd,neon,etc to be optional
+ - auto-generate the list of individual targets from build.conf
+ (that list is not used by the default make targets)
+ - add 'make tools' to the default windows build
+
+See: http://svn.haxx.se/users/archive-2009-07/0764.shtml
+(Message-Id: <alpine.561.2.00.0907241718550.6824@daniel2.local>)
+
+
+'cp' and friends can be obtained from gnuwin32.sf.net, unxutils.sf.net,
+cygwin, etc. Or tweak the makefile to use cp.pl or the built-in 'copy'
+command instead.:
diff --git a/tools/dev/windows-build/document-version.pl b/tools/dev/windows-build/document-version.pl
new file mode 100644
index 0000000..398762b
--- /dev/null
+++ b/tools/dev/windows-build/document-version.pl
@@ -0,0 +1,48 @@
+#!/usr/local/bin/perl -w
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+use strict;
+
+use Tie::File;
+#use Cwd 'getcwd';
+
+die "Bad args '@ARGV'" unless (@ARGV >= 3 && @ARGV <= 4);
+
+my ($filename, $TARGETDIR, $SVNDIR, $BUILDDESCR) = (@ARGV, "");
+
+my (@file, $version, $lines);
+
+tie (@file, 'Tie::File', $filename)
+ or die $!;
+
+$version = `svnversion -n` or die;
+$version =~ tr/M//d;
+$version .= '-' . $BUILDDESCR if $BUILDDESCR;
+
+/^#define SVN_VER_TAG/ and s/(?<=dev build).*(?=\)"$)/-r$version/
+ for @file;
+/^#define SVN_VER_NUMTAG/ and s/(?<=-dev).*(?="$)/-r$version/
+ for @file;
+
+mkdir $TARGETDIR unless -d $TARGETDIR;
+
+chdir $SVNDIR;
+system "svn diff -x-p > $TARGETDIR\\$version.diff"
+ and die $!;
+
diff --git a/tools/dev/x509-parser.c b/tools/dev/x509-parser.c
new file mode 100644
index 0000000..5ed2ab4
--- /dev/null
+++ b/tools/dev/x509-parser.c
@@ -0,0 +1,179 @@
+/* x509-parser.c -- print human readable info from an X.509 certificate
+ *
+ * ====================================================================
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ * ====================================================================
+ */
+
+#include "svn_pools.h"
+#include "svn_cmdline.h"
+#include "svn_string.h"
+#include "svn_dirent_uri.h"
+#include "svn_io.h"
+#include "svn_base64.h"
+#include "svn_x509.h"
+#include "svn_time.h"
+
+#include "svn_private_config.h"
+
+#define PEM_BEGIN_CERT "-----BEGIN CERTIFICATE-----"
+#define PEM_END_CERT "-----END CERTIFICATE-----"
+
+static svn_error_t *
+show_cert(const svn_string_t *der_cert, apr_pool_t *scratch_pool)
+{
+ svn_x509_certinfo_t *certinfo;
+ const apr_array_header_t *hostnames;
+
+ SVN_ERR(svn_x509_parse_cert(&certinfo, der_cert->data, der_cert->len,
+ scratch_pool, scratch_pool));
+
+ SVN_ERR(svn_cmdline_printf(scratch_pool, _("Subject: %s\n"),
+ svn_x509_certinfo_get_subject(certinfo, scratch_pool)));
+ SVN_ERR(svn_cmdline_printf(scratch_pool, _("Valid from: %s\n"),
+ svn_time_to_human_cstring(
+ svn_x509_certinfo_get_valid_from(certinfo),
+ scratch_pool)));
+ SVN_ERR(svn_cmdline_printf(scratch_pool, _("Valid until: %s\n"),
+ svn_time_to_human_cstring(
+ svn_x509_certinfo_get_valid_to(certinfo),
+ scratch_pool)));
+ SVN_ERR(svn_cmdline_printf(scratch_pool, _("Issuer: %s\n"),
+ svn_x509_certinfo_get_issuer(certinfo, scratch_pool)));
+ SVN_ERR(svn_cmdline_printf(scratch_pool, _("Fingerprint: %s\n"),
+ svn_checksum_to_cstring_display(
+ svn_x509_certinfo_get_digest(certinfo),
+ scratch_pool)));
+
+ hostnames = svn_x509_certinfo_get_hostnames(certinfo);
+ if (hostnames && !apr_is_empty_array(hostnames))
+ {
+ int i;
+ svn_stringbuf_t *buf = svn_stringbuf_create_empty(scratch_pool);
+ for (i = 0; i < hostnames->nelts; ++i)
+ {
+ const char *hostname = APR_ARRAY_IDX(hostnames, i, const char*);
+ if (i > 0)
+ svn_stringbuf_appendbytes(buf, ", ", 2);
+ svn_stringbuf_appendbytes(buf, hostname, strlen(hostname));
+ }
+ SVN_ERR(svn_cmdline_printf(scratch_pool, _("Hostnames: %s\n"),
+ buf->data));
+ }
+
+ return SVN_NO_ERROR;
+}
+
+static svn_boolean_t
+is_der_cert(const svn_string_t *raw)
+{
+ /* really simplistic fingerprinting of a DER. By definition it must
+ * start with an ASN.1 tag of a constructed (0x20) sequence (0x10).
+ * It's somewhat unfortunate that 0x30 happens to also come out to the
+ * ASCII for '0' which may mean this will create false positives. */
+ return raw->data[0] == 0x30 ? TRUE : FALSE;
+}
+
+static svn_error_t *
+get_der_cert_from_stream(const svn_string_t **der_cert, svn_stream_t *in,
+ apr_pool_t *pool)
+{
+ svn_string_t *raw;
+ SVN_ERR(svn_string_from_stream2(&raw, in, SVN__STREAM_CHUNK_SIZE,
+ pool));
+
+ *der_cert = NULL;
+
+ /* look for a DER cert */
+ if (is_der_cert(raw))
+ {
+ *der_cert = raw;
+ return SVN_NO_ERROR;
+ }
+ else
+ {
+ const svn_string_t *base64_decoded;
+ const char *start, *end;
+
+ /* Try decoding as base64 without headers */
+ base64_decoded = svn_base64_decode_string(raw, pool);
+ if (base64_decoded && is_der_cert(base64_decoded))
+ {
+ *der_cert = base64_decoded;
+ return SVN_NO_ERROR;
+ }
+
+ /* Try decoding as a PEM with begining and ending headers. */
+ start = strstr(raw->data, PEM_BEGIN_CERT);
+ end = strstr(raw->data, PEM_END_CERT);
+ if (start && end && end > start)
+ {
+ svn_string_t *encoded;
+
+ start += sizeof(PEM_BEGIN_CERT) - 1;
+ end -= 1;
+ encoded = svn_string_ncreate(start, end - start, pool);
+ base64_decoded = svn_base64_decode_string(encoded, pool);
+ if (is_der_cert(base64_decoded))
+ {
+ *der_cert = base64_decoded;
+ return SVN_NO_ERROR;
+ }
+ }
+ }
+
+ return svn_error_create(SVN_ERR_X509_CERT_INVALID_PEM, NULL,
+ _("Couldn't find certificate in input data"));
+}
+
+int main (int argc, const char *argv[])
+{
+ apr_pool_t *pool = NULL;
+ svn_error_t *err;
+ svn_stream_t *in;
+
+ apr_initialize();
+ atexit(apr_terminate);
+
+ pool = svn_pool_create(NULL);
+
+ if (argc == 2)
+ {
+ const char *target = svn_dirent_canonicalize(argv[1], pool);
+ err = svn_stream_open_readonly(&in, target, pool, pool);
+ }
+ else if (argc == 1)
+ {
+ err = svn_stream_for_stdin2(&in, TRUE, pool);
+ }
+ else
+ err = svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL, _("Too many arguments"));
+
+ if (!err)
+ {
+ const svn_string_t *der_cert;
+ err = get_der_cert_from_stream(&der_cert, in, pool);
+ if (!err)
+ err = show_cert(der_cert, pool);
+ }
+
+ if (err)
+ return svn_cmdline_handle_exit_error(err, pool, "x509-parser: ");
+
+ return 0;
+}