summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJelmer Vernooij <jelmer@debian.org>2015-04-30 23:48:39 +0000
committerJelmer Vernooij <jelmer@debian.org>2015-04-30 23:48:39 +0000
commit1bcbc351b963aa1445e8759f6a222ffc77810e72 (patch)
tree96bd29bfd07670d7d631c37a279ca1e06fb863d8
parentf0b1182be27accc32f5ccbbbb1f09c35c6d0ca97 (diff)
parentc835b6e5673816d7c346a06c680b2f452421b6a9 (diff)
Merge tag 'upstream/1.3.5'
Upstream version 1.3.5 # gpg: Signature made Thu 30 Apr 2015 23:48:36 UTC using RSA key ID 5E63D2DA # gpg: Good signature from "Jelmer Vernooij <jelmer@samba.org>" # gpg: aka "Jelmer Vernooij <jelmer@jelmer.uk>" # gpg: aka "Jelmer Vernooij <jelmer@apache.org>" # gpg: aka "Jelmer Vernooij <jelmer@debian.org>" # gpg: aka "Jelmer Vernooij <jelmer@ubuntu.com>" # gpg: aka "Jelmer Vernooij <jelmer@vernstok.nl>" # gpg: aka "Jelmer Vernooij <jelmer@jelmer.co.uk>" # gpg: aka "Jelmer Vernooij <jelmer@openchange.org>" # gpg: aka "Jelmer Vernooij <jrvernooij@tigris.org>" # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: DC83 7EE1 4A7E 3734 7E87 0617 0080 6F2B D729 A457 # Subkey fingerprint: 7905 17F2 5A3F 73D3 FEDA 9117 82D1 F6BF 5E63 D2DA
-rw-r--r--ABI/tdb-1.2.1.sigs95
-rw-r--r--ABI/tdb-1.2.10.sigs66
-rw-r--r--ABI/tdb-1.2.11.sigs67
-rw-r--r--ABI/tdb-1.2.12.sigs67
-rw-r--r--ABI/tdb-1.2.13.sigs67
-rw-r--r--ABI/tdb-1.2.2.sigs60
-rw-r--r--ABI/tdb-1.2.3.sigs60
-rw-r--r--ABI/tdb-1.2.4.sigs60
-rw-r--r--ABI/tdb-1.2.5.sigs61
-rw-r--r--ABI/tdb-1.2.6.sigs61
-rw-r--r--ABI/tdb-1.2.7.sigs61
-rw-r--r--ABI/tdb-1.2.8.sigs61
-rw-r--r--ABI/tdb-1.2.9.sigs62
-rw-r--r--ABI/tdb-1.3.0.sigs68
-rw-r--r--ABI/tdb-1.3.1.sigs68
-rw-r--r--ABI/tdb-1.3.2.sigs68
-rw-r--r--ABI/tdb-1.3.3.sigs68
-rw-r--r--ABI/tdb-1.3.4.sigs68
-rw-r--r--ABI/tdb-1.3.5.sigs69
-rw-r--r--Makefile66
-rw-r--r--buildtools/README12
-rwxr-xr-xbuildtools/bin/waf77
-rwxr-xr-xbuildtools/compare_config_h4.sh12
-rwxr-xr-xbuildtools/compare_generated.sh50
-rwxr-xr-xbuildtools/compare_install.sh8
-rw-r--r--buildtools/scripts/Makefile.waf72
-rwxr-xr-xbuildtools/scripts/abi_gen.sh21
-rwxr-xr-xbuildtools/scripts/autogen-waf.sh27
-rwxr-xr-xbuildtools/scripts/configure.waf14
-rwxr-xr-xbuildtools/testwaf.sh70
-rw-r--r--buildtools/wafsamba/README8
-rw-r--r--buildtools/wafsamba/__init__.py0
-rw-r--r--buildtools/wafsamba/configure_file.py44
-rw-r--r--buildtools/wafsamba/gccdeps.py127
-rw-r--r--buildtools/wafsamba/generic_cc.py71
-rw-r--r--buildtools/wafsamba/hpuxcc.py56
-rw-r--r--buildtools/wafsamba/irixcc.py79
-rw-r--r--buildtools/wafsamba/nothreads.py220
-rw-r--r--buildtools/wafsamba/pkgconfig.py69
-rw-r--r--buildtools/wafsamba/samba3.py133
-rw-r--r--buildtools/wafsamba/samba_abi.py254
-rw-r--r--buildtools/wafsamba/samba_autoconf.py852
-rw-r--r--buildtools/wafsamba/samba_autoproto.py23
-rw-r--r--buildtools/wafsamba/samba_bundled.py257
-rw-r--r--buildtools/wafsamba/samba_conftests.py589
-rw-r--r--buildtools/wafsamba/samba_cross.py134
-rw-r--r--buildtools/wafsamba/samba_deps.py1188
-rw-r--r--buildtools/wafsamba/samba_dist.py251
-rw-r--r--buildtools/wafsamba/samba_headers.py179
-rw-r--r--buildtools/wafsamba/samba_install.py230
-rw-r--r--buildtools/wafsamba/samba_optimisation.py289
-rw-r--r--buildtools/wafsamba/samba_patterns.py210
-rw-r--r--buildtools/wafsamba/samba_perl.py62
-rw-r--r--buildtools/wafsamba/samba_pidl.py145
-rw-r--r--buildtools/wafsamba/samba_python.py71
-rw-r--r--buildtools/wafsamba/samba_third_party.py35
-rw-r--r--buildtools/wafsamba/samba_utils.py668
-rw-r--r--buildtools/wafsamba/samba_version.py321
-rw-r--r--buildtools/wafsamba/samba_wildcard.py153
-rw-r--r--buildtools/wafsamba/stale_files.py111
-rw-r--r--buildtools/wafsamba/symbols.py656
-rwxr-xr-xbuildtools/wafsamba/test_duplicate_symbol.sh12
-rw-r--r--buildtools/wafsamba/tests/__init__.py35
-rw-r--r--buildtools/wafsamba/tests/test_abi.py120
-rw-r--r--buildtools/wafsamba/tests/test_bundled.py27
-rw-r--r--buildtools/wafsamba/tests/test_utils.py76
-rw-r--r--buildtools/wafsamba/tru64cc.py77
-rw-r--r--buildtools/wafsamba/wafsamba.py971
-rwxr-xr-xbuildtools/wafsamba/wscript540
-rw-r--r--common/check.c474
-rw-r--r--common/dump.c136
-rw-r--r--common/error.c57
-rw-r--r--common/freelist.c772
-rw-r--r--common/freelistcheck.c108
-rw-r--r--common/hash.c345
-rw-r--r--common/io.c676
-rw-r--r--common/lock.c982
-rw-r--r--common/mutex.c1027
-rw-r--r--common/open.c941
-rw-r--r--common/rescue.c349
-rw-r--r--common/summary.c210
-rw-r--r--common/tdb.c1141
-rw-r--r--common/tdb_private.h327
-rw-r--r--common/transaction.c1312
-rw-r--r--common/traverse.c366
-rwxr-xr-xconfigure21
-rw-r--r--docs/README273
-rw-r--r--docs/mainpage.dox61
-rw-r--r--docs/mutex.txt136
-rw-r--r--docs/tdb.magic10
-rw-r--r--docs/tracing.txt46
-rw-r--r--doxy.config1697
-rw-r--r--include/tdb.h911
-rw-r--r--lib/replace/.checker_innocent4
-rw-r--r--lib/replace/Makefile63
-rw-r--r--lib/replace/README127
-rwxr-xr-xlib/replace/configure21
-rw-r--r--lib/replace/crypt.c770
-rw-r--r--lib/replace/cwrap.c46
-rw-r--r--lib/replace/dlfcn.c76
-rw-r--r--lib/replace/getaddrinfo.c493
-rw-r--r--lib/replace/getaddrinfo.h91
-rw-r--r--lib/replace/getifaddrs.c383
-rw-r--r--lib/replace/hdr_replace.h2
-rw-r--r--lib/replace/inet_aton.c33
-rw-r--r--lib/replace/inet_ntoa.c39
-rw-r--r--lib/replace/inet_ntop.c191
-rw-r--r--lib/replace/inet_pton.c213
-rw-r--r--lib/replace/poll.c139
-rw-r--r--lib/replace/replace-test.h9
-rw-r--r--lib/replace/replace-testsuite.h10
-rw-r--r--lib/replace/replace.c910
-rw-r--r--lib/replace/replace.h916
-rw-r--r--lib/replace/snprintf.c1534
-rw-r--r--lib/replace/socket.c39
-rw-r--r--lib/replace/socketpair.c46
-rw-r--r--lib/replace/strptime.c993
-rw-r--r--lib/replace/system/README4
-rw-r--r--lib/replace/system/aio.h32
-rw-r--r--lib/replace/system/capability.h55
-rw-r--r--lib/replace/system/dir.h67
-rw-r--r--lib/replace/system/filesys.h269
-rw-r--r--lib/replace/system/glob.h37
-rw-r--r--lib/replace/system/gssapi.h53
-rw-r--r--lib/replace/system/iconv.h57
-rw-r--r--lib/replace/system/kerberos.h41
-rw-r--r--lib/replace/system/locale.h42
-rw-r--r--lib/replace/system/network.h368
-rw-r--r--lib/replace/system/passwd.h92
-rw-r--r--lib/replace/system/readline.h58
-rw-r--r--lib/replace/system/select.h81
-rw-r--r--lib/replace/system/shmem.h59
-rw-r--r--lib/replace/system/syslog.h70
-rw-r--r--lib/replace/system/terminal.h46
-rw-r--r--lib/replace/system/threads.h48
-rw-r--r--lib/replace/system/time.h91
-rw-r--r--lib/replace/system/wait.h55
-rw-r--r--lib/replace/system/wscript_configure26
-rw-r--r--lib/replace/test/getifaddrs.c105
-rw-r--r--lib/replace/test/incoherent_mmap.c83
-rw-r--r--lib/replace/test/main.c35
-rw-r--r--lib/replace/test/os2_delete.c134
-rw-r--r--lib/replace/test/shared_mmap.c68
-rw-r--r--lib/replace/test/shared_mremap.c48
-rw-r--r--lib/replace/test/snprintf.c29
-rw-r--r--lib/replace/test/strptime.c173
-rw-r--r--lib/replace/test/testsuite.c1114
-rw-r--r--lib/replace/timegm.c78
-rw-r--r--lib/replace/win32_replace.h159
-rw-r--r--lib/replace/wscript730
-rw-r--r--lib/replace/xattr.c734
-rw-r--r--man/tdbbackup.8.xml149
-rw-r--r--man/tdbdump.8.xml93
-rw-r--r--man/tdbrestore.8.xml67
-rw-r--r--man/tdbtool.8.xml265
-rw-r--r--pytdb.c707
-rw-r--r--python/tdbdump.py12
-rw-r--r--python/tests/simple.py183
-rw-r--r--tdb.pc.in11
-rw-r--r--test/external-agent.c223
-rw-r--r--test/external-agent.h44
-rw-r--r--test/jenkins-be-hash.tdbbin0 -> 696 bytes
-rw-r--r--test/jenkins-le-hash.tdbbin0 -> 696 bytes
-rw-r--r--test/lock-tracking.c157
-rw-r--r--test/lock-tracking.h25
-rw-r--r--test/logging.c33
-rw-r--r--test/logging.h11
-rw-r--r--test/old-nohash-be.tdbbin0 -> 696 bytes
-rw-r--r--test/old-nohash-le.tdbbin0 -> 696 bytes
-rw-r--r--test/run-3G-file.c145
-rw-r--r--test/run-bad-tdb-header.c59
-rw-r--r--test/run-check.c65
-rw-r--r--test/run-corrupt.c132
-rw-r--r--test/run-die-during-transaction.c232
-rw-r--r--test/run-endian.c64
-rw-r--r--test/run-incompatible.c188
-rw-r--r--test/run-marklock-deadlock.c278
-rw-r--r--test/run-mutex-allrecord-bench.c82
-rw-r--r--test/run-mutex-allrecord-block.c120
-rw-r--r--test/run-mutex-allrecord-trylock.c113
-rw-r--r--test/run-mutex-die.c269
-rw-r--r--test/run-mutex-openflags2.c153
-rw-r--r--test/run-mutex-transaction1.c236
-rw-r--r--test/run-mutex-trylock.c122
-rw-r--r--test/run-mutex1.c138
-rw-r--r--test/run-nested-transactions.c79
-rw-r--r--test/run-nested-traverse.c88
-rw-r--r--test/run-no-lock-during-traverse.c114
-rw-r--r--test/run-oldhash.c50
-rw-r--r--test/run-open-during-transaction.c183
-rw-r--r--test/run-readonly-check.c53
-rw-r--r--test/run-rescue-find_entry.c51
-rw-r--r--test/run-rescue.c127
-rw-r--r--test/run-rwlock-check.c46
-rw-r--r--test/run-summary.c65
-rw-r--r--test/run-transaction-expand.c125
-rw-r--r--test/run-traverse-in-transaction.c87
-rw-r--r--test/run-wronghash-fail.c121
-rw-r--r--test/run-zero-append.c41
-rw-r--r--test/run.c50
-rw-r--r--test/rwlock-be.tdbbin0 -> 696 bytes
-rw-r--r--test/rwlock-le.tdbbin0 -> 696 bytes
-rw-r--r--test/tap-interface.h58
-rw-r--r--test/tap-to-subunit.h155
-rw-r--r--test/tdb.corruptbin0 -> 192512 bytes
-rw-r--r--third_party/waf/wafadmin/3rdparty/ParallelDebug.py297
-rw-r--r--third_party/waf/wafadmin/3rdparty/batched_cc.py182
-rw-r--r--third_party/waf/wafadmin/3rdparty/boost.py342
-rw-r--r--third_party/waf/wafadmin/3rdparty/build_file_tracker.py53
-rw-r--r--third_party/waf/wafadmin/3rdparty/fluid.py26
-rw-r--r--third_party/waf/wafadmin/3rdparty/gccdeps.py127
-rw-r--r--third_party/waf/wafadmin/3rdparty/go.py110
-rw-r--r--third_party/waf/wafadmin/3rdparty/lru_cache.py96
-rw-r--r--third_party/waf/wafadmin/3rdparty/paranoid.py34
-rwxr-xr-xthird_party/waf/wafadmin/3rdparty/prefork.py275
-rw-r--r--third_party/waf/wafadmin/3rdparty/swig.py189
-rw-r--r--third_party/waf/wafadmin/3rdparty/valadoc.py112
-rw-r--r--third_party/waf/wafadmin/Build.py1032
-rw-r--r--third_party/waf/wafadmin/Configure.py442
-rw-r--r--third_party/waf/wafadmin/Constants.py75
-rw-r--r--third_party/waf/wafadmin/Environment.py209
-rw-r--r--third_party/waf/wafadmin/Logs.py133
-rw-r--r--third_party/waf/wafadmin/Node.py694
-rw-r--r--third_party/waf/wafadmin/Options.py287
-rw-r--r--third_party/waf/wafadmin/Runner.py235
-rw-r--r--third_party/waf/wafadmin/Scripting.py585
-rw-r--r--third_party/waf/wafadmin/Task.py1199
-rw-r--r--third_party/waf/wafadmin/TaskGen.py611
-rw-r--r--third_party/waf/wafadmin/Tools/__init__.py3
-rw-r--r--third_party/waf/wafadmin/Tools/ar.py34
-rw-r--r--third_party/waf/wafadmin/Tools/bison.py37
-rw-r--r--third_party/waf/wafadmin/Tools/cc.py99
-rw-r--r--third_party/waf/wafadmin/Tools/ccroot.py639
-rw-r--r--third_party/waf/wafadmin/Tools/compiler_cc.py66
-rw-r--r--third_party/waf/wafadmin/Tools/compiler_cxx.py61
-rw-r--r--third_party/waf/wafadmin/Tools/compiler_d.py32
-rw-r--r--third_party/waf/wafadmin/Tools/config_c.py748
-rw-r--r--third_party/waf/wafadmin/Tools/cs.py67
-rw-r--r--third_party/waf/wafadmin/Tools/cxx.py103
-rw-r--r--third_party/waf/wafadmin/Tools/d.py534
-rw-r--r--third_party/waf/wafadmin/Tools/dbus.py33
-rw-r--r--third_party/waf/wafadmin/Tools/dmd.py63
-rw-r--r--third_party/waf/wafadmin/Tools/flex.py24
-rw-r--r--third_party/waf/wafadmin/Tools/gas.py37
-rw-r--r--third_party/waf/wafadmin/Tools/gcc.py138
-rw-r--r--third_party/waf/wafadmin/Tools/gdc.py51
-rw-r--r--third_party/waf/wafadmin/Tools/glib2.py163
-rw-r--r--third_party/waf/wafadmin/Tools/gnome.py222
-rw-r--r--third_party/waf/wafadmin/Tools/gnu_dirs.py110
-rw-r--r--third_party/waf/wafadmin/Tools/gob2.py17
-rw-r--r--third_party/waf/wafadmin/Tools/gxx.py136
-rw-r--r--third_party/waf/wafadmin/Tools/icc.py37
-rw-r--r--third_party/waf/wafadmin/Tools/icpc.py35
-rw-r--r--third_party/waf/wafadmin/Tools/intltool.py138
-rw-r--r--third_party/waf/wafadmin/Tools/javaw.py254
-rw-r--r--third_party/waf/wafadmin/Tools/kde4.py73
-rw-r--r--third_party/waf/wafadmin/Tools/libtool.py329
-rw-r--r--third_party/waf/wafadmin/Tools/lua.py24
-rw-r--r--third_party/waf/wafadmin/Tools/misc.py429
-rw-r--r--third_party/waf/wafadmin/Tools/msvc.py796
-rw-r--r--third_party/waf/wafadmin/Tools/nasm.py48
-rw-r--r--third_party/waf/wafadmin/Tools/ocaml.py297
-rw-r--r--third_party/waf/wafadmin/Tools/osx.py187
-rw-r--r--third_party/waf/wafadmin/Tools/perl.py108
-rw-r--r--third_party/waf/wafadmin/Tools/preproc.py837
-rw-r--r--third_party/waf/wafadmin/Tools/python.py432
-rw-r--r--third_party/waf/wafadmin/Tools/qt4.py504
-rw-r--r--third_party/waf/wafadmin/Tools/ruby.py119
-rw-r--r--third_party/waf/wafadmin/Tools/suncc.py76
-rw-r--r--third_party/waf/wafadmin/Tools/suncxx.py75
-rw-r--r--third_party/waf/wafadmin/Tools/tex.py250
-rw-r--r--third_party/waf/wafadmin/Tools/unittestw.py308
-rw-r--r--third_party/waf/wafadmin/Tools/vala.py307
-rw-r--r--third_party/waf/wafadmin/Tools/winres.py44
-rw-r--r--third_party/waf/wafadmin/Tools/xlc.py78
-rw-r--r--third_party/waf/wafadmin/Tools/xlcxx.py78
-rw-r--r--third_party/waf/wafadmin/Utils.py725
-rw-r--r--third_party/waf/wafadmin/__init__.py3
-rw-r--r--third_party/waf/wafadmin/ansiterm.py235
-rw-r--r--third_party/waf/wafadmin/pproc.py619
-rw-r--r--third_party/waf/wafadmin/py3kfixes.py129
-rw-r--r--tools/tdbbackup.c349
-rw-r--r--tools/tdbdump.c176
-rw-r--r--tools/tdbrestore.c222
-rw-r--r--tools/tdbtest.c290
-rw-r--r--tools/tdbtool.c858
-rw-r--r--tools/tdbtorture.c482
-rw-r--r--web/index.html48
-rw-r--r--wscript243
289 files changed, 62569 insertions, 0 deletions
diff --git a/ABI/tdb-1.2.1.sigs b/ABI/tdb-1.2.1.sigs
new file mode 100644
index 0000000..84f2007
--- /dev/null
+++ b/ABI/tdb-1.2.1.sigs
@@ -0,0 +1,95 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_alloc_read: unsigned char *(struct tdb_context *, tdb_off_t, tdb_len_t)
+tdb_allocate: tdb_off_t (struct tdb_context *, tdb_len_t, struct tdb_record *)
+tdb_allrecord_lock: int (struct tdb_context *, int, enum tdb_lock_flags, bool)
+tdb_allrecord_unlock: int (struct tdb_context *, int, bool)
+tdb_allrecord_upgrade: int (struct tdb_context *)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_brlock: int (struct tdb_context *, int, tdb_off_t, size_t, enum tdb_lock_flags)
+tdb_brunlock: int (struct tdb_context *, int, tdb_off_t, size_t)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_convert: void *(void *, uint32_t)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_do_delete: int (struct tdb_context *, tdb_off_t, struct tdb_record *)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_expand: int (struct tdb_context *, tdb_off_t)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_find_lock_hash: tdb_off_t (struct tdb_context *, TDB_DATA, uint32_t, int, struct tdb_record *)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_free: int (struct tdb_context *, tdb_off_t, struct tdb_record *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_have_extra_locks: bool (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_io_init: void (struct tdb_context *)
+tdb_lock: int (struct tdb_context *, int, int)
+tdb_lock_nonblock: int (struct tdb_context *, int, int)
+tdb_lock_record: int (struct tdb_context *, tdb_off_t)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_mmap: void (struct tdb_context *)
+tdb_munmap: int (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_needs_recovery: bool (struct tdb_context *)
+tdb_nest_lock: int (struct tdb_context *, uint32_t, int, enum tdb_lock_flags)
+tdb_nest_unlock: int (struct tdb_context *, uint32_t, int, bool)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_ofs_read: int (struct tdb_context *, tdb_off_t, tdb_off_t *)
+tdb_ofs_write: int (struct tdb_context *, tdb_off_t, tdb_off_t *)
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_data: int (struct tdb_context *, TDB_DATA, tdb_off_t, tdb_len_t, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_rec_free_read: int (struct tdb_context *, tdb_off_t, struct tdb_record *)
+tdb_rec_read: int (struct tdb_context *, tdb_off_t, struct tdb_record *)
+tdb_rec_write: int (struct tdb_context *, tdb_off_t, struct tdb_record *)
+tdb_release_transaction_locks: void (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_lock: int (struct tdb_context *, int, enum tdb_lock_flags)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_recover: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_transaction_unlock: int (struct tdb_context *, int)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlock: int (struct tdb_context *, int, int)
+tdb_unlock_record: int (struct tdb_context *, tdb_off_t)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
+tdb_write_lock_record: int (struct tdb_context *, tdb_off_t)
+tdb_write_unlock_record: int (struct tdb_context *, tdb_off_t)
diff --git a/ABI/tdb-1.2.10.sigs b/ABI/tdb-1.2.10.sigs
new file mode 100644
index 0000000..61f6c19
--- /dev/null
+++ b/ABI/tdb-1.2.10.sigs
@@ -0,0 +1,66 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lock_nonblock: int (struct tdb_context *, int, int)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_summary: char *(struct tdb_context *)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_transaction_write_lock_mark: int (struct tdb_context *)
+tdb_transaction_write_lock_unmark: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlock: int (struct tdb_context *, int, int)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.2.11.sigs b/ABI/tdb-1.2.11.sigs
new file mode 100644
index 0000000..d727f21
--- /dev/null
+++ b/ABI/tdb-1.2.11.sigs
@@ -0,0 +1,67 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lock_nonblock: int (struct tdb_context *, int, int)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_summary: char *(struct tdb_context *)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_transaction_write_lock_mark: int (struct tdb_context *)
+tdb_transaction_write_lock_unmark: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlock: int (struct tdb_context *, int, int)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.2.12.sigs b/ABI/tdb-1.2.12.sigs
new file mode 100644
index 0000000..d727f21
--- /dev/null
+++ b/ABI/tdb-1.2.12.sigs
@@ -0,0 +1,67 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lock_nonblock: int (struct tdb_context *, int, int)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_summary: char *(struct tdb_context *)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_transaction_write_lock_mark: int (struct tdb_context *)
+tdb_transaction_write_lock_unmark: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlock: int (struct tdb_context *, int, int)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.2.13.sigs b/ABI/tdb-1.2.13.sigs
new file mode 100644
index 0000000..d727f21
--- /dev/null
+++ b/ABI/tdb-1.2.13.sigs
@@ -0,0 +1,67 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lock_nonblock: int (struct tdb_context *, int, int)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_summary: char *(struct tdb_context *)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_transaction_write_lock_mark: int (struct tdb_context *)
+tdb_transaction_write_lock_unmark: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlock: int (struct tdb_context *, int, int)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.2.2.sigs b/ABI/tdb-1.2.2.sigs
new file mode 100644
index 0000000..043790d
--- /dev/null
+++ b/ABI/tdb-1.2.2.sigs
@@ -0,0 +1,60 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.2.3.sigs b/ABI/tdb-1.2.3.sigs
new file mode 100644
index 0000000..043790d
--- /dev/null
+++ b/ABI/tdb-1.2.3.sigs
@@ -0,0 +1,60 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.2.4.sigs b/ABI/tdb-1.2.4.sigs
new file mode 100644
index 0000000..043790d
--- /dev/null
+++ b/ABI/tdb-1.2.4.sigs
@@ -0,0 +1,60 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.2.5.sigs b/ABI/tdb-1.2.5.sigs
new file mode 100644
index 0000000..1e01f3b
--- /dev/null
+++ b/ABI/tdb-1.2.5.sigs
@@ -0,0 +1,61 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.2.6.sigs b/ABI/tdb-1.2.6.sigs
new file mode 100644
index 0000000..1e01f3b
--- /dev/null
+++ b/ABI/tdb-1.2.6.sigs
@@ -0,0 +1,61 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.2.7.sigs b/ABI/tdb-1.2.7.sigs
new file mode 100644
index 0000000..1e01f3b
--- /dev/null
+++ b/ABI/tdb-1.2.7.sigs
@@ -0,0 +1,61 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.2.8.sigs b/ABI/tdb-1.2.8.sigs
new file mode 100644
index 0000000..1e01f3b
--- /dev/null
+++ b/ABI/tdb-1.2.8.sigs
@@ -0,0 +1,61 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.2.9.sigs b/ABI/tdb-1.2.9.sigs
new file mode 100644
index 0000000..9e4149b
--- /dev/null
+++ b/ABI/tdb-1.2.9.sigs
@@ -0,0 +1,62 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_summary: char *(struct tdb_context *)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.3.0.sigs b/ABI/tdb-1.3.0.sigs
new file mode 100644
index 0000000..7d3e469
--- /dev/null
+++ b/ABI/tdb-1.3.0.sigs
@@ -0,0 +1,68 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lock_nonblock: int (struct tdb_context *, int, int)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_runtime_check_for_robust_mutexes: bool (void)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_summary: char *(struct tdb_context *)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_transaction_write_lock_mark: int (struct tdb_context *)
+tdb_transaction_write_lock_unmark: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlock: int (struct tdb_context *, int, int)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.3.1.sigs b/ABI/tdb-1.3.1.sigs
new file mode 100644
index 0000000..7d3e469
--- /dev/null
+++ b/ABI/tdb-1.3.1.sigs
@@ -0,0 +1,68 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lock_nonblock: int (struct tdb_context *, int, int)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_runtime_check_for_robust_mutexes: bool (void)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_summary: char *(struct tdb_context *)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_transaction_write_lock_mark: int (struct tdb_context *)
+tdb_transaction_write_lock_unmark: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlock: int (struct tdb_context *, int, int)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.3.2.sigs b/ABI/tdb-1.3.2.sigs
new file mode 100644
index 0000000..7d3e469
--- /dev/null
+++ b/ABI/tdb-1.3.2.sigs
@@ -0,0 +1,68 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lock_nonblock: int (struct tdb_context *, int, int)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_runtime_check_for_robust_mutexes: bool (void)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_summary: char *(struct tdb_context *)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_transaction_write_lock_mark: int (struct tdb_context *)
+tdb_transaction_write_lock_unmark: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlock: int (struct tdb_context *, int, int)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.3.3.sigs b/ABI/tdb-1.3.3.sigs
new file mode 100644
index 0000000..7d3e469
--- /dev/null
+++ b/ABI/tdb-1.3.3.sigs
@@ -0,0 +1,68 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lock_nonblock: int (struct tdb_context *, int, int)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_runtime_check_for_robust_mutexes: bool (void)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_summary: char *(struct tdb_context *)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_transaction_write_lock_mark: int (struct tdb_context *)
+tdb_transaction_write_lock_unmark: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlock: int (struct tdb_context *, int, int)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.3.4.sigs b/ABI/tdb-1.3.4.sigs
new file mode 100644
index 0000000..7d3e469
--- /dev/null
+++ b/ABI/tdb-1.3.4.sigs
@@ -0,0 +1,68 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lock_nonblock: int (struct tdb_context *, int, int)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_runtime_check_for_robust_mutexes: bool (void)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_summary: char *(struct tdb_context *)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_transaction_write_lock_mark: int (struct tdb_context *)
+tdb_transaction_write_lock_unmark: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlock: int (struct tdb_context *, int, int)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/ABI/tdb-1.3.5.sigs b/ABI/tdb-1.3.5.sigs
new file mode 100644
index 0000000..2545c99
--- /dev/null
+++ b/ABI/tdb-1.3.5.sigs
@@ -0,0 +1,69 @@
+tdb_add_flags: void (struct tdb_context *, unsigned int)
+tdb_append: int (struct tdb_context *, TDB_DATA, TDB_DATA)
+tdb_chainlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_mark: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_read_nonblock: int (struct tdb_context *, TDB_DATA)
+tdb_chainlock_unmark: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock: int (struct tdb_context *, TDB_DATA)
+tdb_chainunlock_read: int (struct tdb_context *, TDB_DATA)
+tdb_check: int (struct tdb_context *, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_close: int (struct tdb_context *)
+tdb_delete: int (struct tdb_context *, TDB_DATA)
+tdb_dump_all: void (struct tdb_context *)
+tdb_enable_seqnum: void (struct tdb_context *)
+tdb_error: enum TDB_ERROR (struct tdb_context *)
+tdb_errorstr: const char *(struct tdb_context *)
+tdb_exists: int (struct tdb_context *, TDB_DATA)
+tdb_fd: int (struct tdb_context *)
+tdb_fetch: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_firstkey: TDB_DATA (struct tdb_context *)
+tdb_freelist_size: int (struct tdb_context *)
+tdb_get_flags: int (struct tdb_context *)
+tdb_get_logging_private: void *(struct tdb_context *)
+tdb_get_seqnum: int (struct tdb_context *)
+tdb_hash_size: int (struct tdb_context *)
+tdb_increment_seqnum_nonblock: void (struct tdb_context *)
+tdb_jenkins_hash: unsigned int (TDB_DATA *)
+tdb_lock_nonblock: int (struct tdb_context *, int, int)
+tdb_lockall: int (struct tdb_context *)
+tdb_lockall_mark: int (struct tdb_context *)
+tdb_lockall_nonblock: int (struct tdb_context *)
+tdb_lockall_read: int (struct tdb_context *)
+tdb_lockall_read_nonblock: int (struct tdb_context *)
+tdb_lockall_unmark: int (struct tdb_context *)
+tdb_log_fn: tdb_log_func (struct tdb_context *)
+tdb_map_size: size_t (struct tdb_context *)
+tdb_name: const char *(struct tdb_context *)
+tdb_nextkey: TDB_DATA (struct tdb_context *, TDB_DATA)
+tdb_null: dptr = 0xXXXX, dsize = 0
+tdb_open: struct tdb_context *(const char *, int, int, int, mode_t)
+tdb_open_ex: struct tdb_context *(const char *, int, int, int, mode_t, const struct tdb_logging_context *, tdb_hash_func)
+tdb_parse_record: int (struct tdb_context *, TDB_DATA, int (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_printfreelist: int (struct tdb_context *)
+tdb_remove_flags: void (struct tdb_context *, unsigned int)
+tdb_reopen: int (struct tdb_context *)
+tdb_reopen_all: int (int)
+tdb_repack: int (struct tdb_context *)
+tdb_rescue: int (struct tdb_context *, void (*)(TDB_DATA, TDB_DATA, void *), void *)
+tdb_runtime_check_for_robust_mutexes: bool (void)
+tdb_set_logging_function: void (struct tdb_context *, const struct tdb_logging_context *)
+tdb_set_max_dead: void (struct tdb_context *, int)
+tdb_setalarm_sigptr: void (struct tdb_context *, volatile sig_atomic_t *)
+tdb_store: int (struct tdb_context *, TDB_DATA, TDB_DATA, int)
+tdb_summary: char *(struct tdb_context *)
+tdb_transaction_cancel: int (struct tdb_context *)
+tdb_transaction_commit: int (struct tdb_context *)
+tdb_transaction_prepare_commit: int (struct tdb_context *)
+tdb_transaction_start: int (struct tdb_context *)
+tdb_transaction_start_nonblock: int (struct tdb_context *)
+tdb_transaction_write_lock_mark: int (struct tdb_context *)
+tdb_transaction_write_lock_unmark: int (struct tdb_context *)
+tdb_traverse: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_traverse_read: int (struct tdb_context *, tdb_traverse_func, void *)
+tdb_unlock: int (struct tdb_context *, int, int)
+tdb_unlockall: int (struct tdb_context *)
+tdb_unlockall_read: int (struct tdb_context *)
+tdb_validate_freelist: int (struct tdb_context *, int *)
+tdb_wipe_all: int (struct tdb_context *)
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..fe44ff6
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,66 @@
+# simple makefile wrapper to run waf
+
+WAF=WAF_MAKE=1 PATH=buildtools/bin:../../buildtools/bin:$$PATH waf
+
+all:
+ $(WAF) build
+
+install:
+ $(WAF) install
+
+uninstall:
+ $(WAF) uninstall
+
+test: FORCE
+ $(WAF) test $(TEST_OPTIONS)
+
+testenv:
+ $(WAF) test --testenv $(TEST_OPTIONS)
+
+quicktest:
+ $(WAF) test --quick $(TEST_OPTIONS)
+
+dist:
+ touch .tmplock
+ WAFLOCK=.tmplock $(WAF) dist
+
+distcheck:
+ touch .tmplock
+ WAFLOCK=.tmplock $(WAF) distcheck
+
+clean:
+ $(WAF) clean
+
+distclean:
+ $(WAF) distclean
+
+reconfigure: configure
+ $(WAF) reconfigure
+
+show_waf_options:
+ $(WAF) --help
+
+# some compatibility make targets
+everything: all
+
+testsuite: all
+
+check: test
+
+torture: all
+
+# this should do an install as well, once install is finished
+installcheck: test
+
+etags:
+ $(WAF) etags
+
+ctags:
+ $(WAF) ctags
+
+pydoctor:
+ $(WAF) pydoctor
+
+bin/%:: FORCE
+ $(WAF) --targets=`basename $@`
+FORCE:
diff --git a/buildtools/README b/buildtools/README
new file mode 100644
index 0000000..eab0382
--- /dev/null
+++ b/buildtools/README
@@ -0,0 +1,12 @@
+See http://code.google.com/p/waf/ for more information on waf
+
+You can get a svn copy of the upstream source with:
+
+ svn checkout http://waf.googlecode.com/svn/trunk/ waf-read-only
+
+Samba currently uses waf 1.5, which can be found at:
+
+ http://waf.googlecode.com/svn/branches/waf-1.5
+
+To update the current copy of waf, use the update-waf.sh script in this
+directory.
diff --git a/buildtools/bin/waf b/buildtools/bin/waf
new file mode 100755
index 0000000..1b0f466
--- /dev/null
+++ b/buildtools/bin/waf
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+# encoding: ISO-8859-1
+# Thomas Nagy, 2005-2010
+
+"""
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. The name of the author may not be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import os, sys
+if sys.hexversion<0x203000f: raise ImportError("Waf requires Python >= 2.3")
+
+if 'PSYCOWAF' in os.environ:
+ try:import psyco;psyco.full()
+ except:pass
+
+VERSION="1.5.19"
+REVISION="x"
+INSTALL="x"
+C1='x'
+C2='x'
+cwd = os.getcwd()
+join = os.path.join
+
+WAF='waf'
+def b(x):
+ return x
+
+if sys.hexversion>0x300000f:
+ WAF='waf3'
+ def b(x):
+ return x.encode()
+
+def err(m):
+ print(('\033[91mError: %s\033[0m' % m))
+ sys.exit(1)
+
+def test(dir):
+ try: os.stat(join(dir, 'wafadmin')); return os.path.abspath(dir)
+ except OSError: pass
+
+def find_lib():
+ return os.path.abspath(os.path.join(os.path.dirname(__file__), '../../third_party/waf'))
+
+wafdir = find_lib()
+w = join(wafdir, 'wafadmin')
+t = join(w, 'Tools')
+f = join(w, '3rdparty')
+sys.path = [w, t, f] + sys.path
+
+if __name__ == '__main__':
+ import Scripting
+ Scripting.prepare(t, cwd, VERSION, wafdir)
+
diff --git a/buildtools/compare_config_h4.sh b/buildtools/compare_config_h4.sh
new file mode 100755
index 0000000..b78b36f
--- /dev/null
+++ b/buildtools/compare_config_h4.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+# compare the generated config.h from a waf build with existing samba
+# build
+
+grep "^.define" bin/default/source4/include/config.h | sort > waf-config.h
+grep "^.define" $HOME/samba_old/source4/include/config.h | sort > old-config.h
+
+comm -23 old-config.h waf-config.h
+
+#echo
+#diff -u old-config.h waf-config.h
diff --git a/buildtools/compare_generated.sh b/buildtools/compare_generated.sh
new file mode 100755
index 0000000..ebef8a9
--- /dev/null
+++ b/buildtools/compare_generated.sh
@@ -0,0 +1,50 @@
+#!/bin/sh
+
+# compare the generated files from a waf
+
+old_build=$HOME/samba_old
+
+gen_files=$(cd bin/default && find . -type f -name '*.[ch]')
+
+2>&1
+
+strip_file()
+{
+ in_file=$1
+ out_file=$2
+ cat $in_file |
+ grep -v 'The following definitions come from' |
+ grep -v 'Automatically generated at' |
+ grep -v 'Generated from' |
+ sed 's|/home/tnagy/samba/source4||g' |
+ sed 's|/home/tnagy/samba/|../|g' |
+ sed 's|bin/default/source4/||g' |
+ sed 's|bin/default/|../|g' |
+ sed 's/define _____/define ___/g' |
+ sed 's/define __*/define _/g' |
+ sed 's/define _DEFAULT_/define _/g' |
+ sed 's/define _SOURCE4_/define ___/g' |
+ sed 's/define ___/define _/g' |
+ sed 's/ifndef ___/ifndef _/g' |
+ sed 's|endif /* ____|endif /* __|g' |
+ sed s/__DEFAULT_SOURCE4/__/ |
+ sed s/__DEFAULT_SOURCE4/__/ |
+ sed s/__DEFAULT/____/ > $out_file
+}
+
+compare_file()
+{
+ f=$f
+ bname=$(basename $f)
+ t1=/tmp/$bname.old.$$
+ t2=/tmp/$bname.new.$$
+ strip_file $old_build/$f $t1
+ strip_file bin/default/$f $t2
+ diff -u -b $t1 $t2 2>&1
+ rm -f $t1 $t2
+}
+
+for f in $gen_files; do
+ compare_file $f
+done
+
diff --git a/buildtools/compare_install.sh b/buildtools/compare_install.sh
new file mode 100755
index 0000000..b964117
--- /dev/null
+++ b/buildtools/compare_install.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+prefix1="$1"
+prefix2="$2"
+
+(cd $prefix1 && find . ) | sort > p1.txt
+(cd $prefix2 && find . ) | sort > p2.txt
+diff -u p[12].txt
diff --git a/buildtools/scripts/Makefile.waf b/buildtools/scripts/Makefile.waf
new file mode 100644
index 0000000..5fc939c
--- /dev/null
+++ b/buildtools/scripts/Makefile.waf
@@ -0,0 +1,72 @@
+# simple makefile wrapper to run waf
+
+WAF_BINARY=BUILDTOOLS/bin/waf
+WAF=WAF_MAKE=1 $(WAF_BINARY)
+
+all:
+ $(WAF) build
+
+install:
+ $(WAF) install
+
+uninstall:
+ $(WAF) uninstall
+
+test:
+ $(WAF) test $(TEST_OPTIONS)
+
+help:
+ @echo NOTE: to run extended waf options use $(WAF_BINARY) or modify your PATH
+ $(WAF) --help
+
+testenv:
+ $(WAF) test --testenv $(TEST_OPTIONS)
+
+quicktest:
+ $(WAF) test --quick $(TEST_OPTIONS)
+
+dist:
+ $(WAF) dist
+
+distcheck:
+ $(WAF) distcheck
+
+clean:
+ $(WAF) clean
+
+distclean:
+ $(WAF) distclean
+
+reconfigure: configure
+ $(WAF) reconfigure
+
+show_waf_options:
+ $(WAF) --help
+
+# some compatibility make targets
+everything: all
+
+testsuite: all
+
+check: test
+
+torture: all
+
+# this should do an install as well, once install is finished
+installcheck: test
+
+etags:
+ $(WAF) etags
+
+ctags:
+ $(WAF) ctags
+
+bin/%:: FORCE
+ $(WAF) --targets=$@
+FORCE:
+
+configure: autogen-waf.sh BUILDTOOLS/scripts/configure.waf
+ ./autogen-waf.sh
+
+Makefile: autogen-waf.sh configure BUILDTOOLS/scripts/Makefile.waf
+ ./autogen-waf.sh
diff --git a/buildtools/scripts/abi_gen.sh b/buildtools/scripts/abi_gen.sh
new file mode 100755
index 0000000..787718c
--- /dev/null
+++ b/buildtools/scripts/abi_gen.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+# generate a set of ABI signatures from a shared library
+
+SHAREDLIB="$1"
+
+GDBSCRIPT="gdb_syms.$$"
+
+(
+cat <<EOF
+set height 0
+set width 0
+EOF
+nm "$SHAREDLIB" | cut -d' ' -f2- | egrep '^[BDGTRVWS]' | grep -v @ | egrep -v ' (__bss_start|_edata|_init|_fini|_end)' | cut -c3- | sort | while read s; do
+ echo "echo $s: "
+ echo p $s
+done
+) > $GDBSCRIPT
+
+# forcing the terminal avoids a problem on Fedora12
+TERM=none gdb -batch -x $GDBSCRIPT "$SHAREDLIB" < /dev/null
+rm -f $GDBSCRIPT
diff --git a/buildtools/scripts/autogen-waf.sh b/buildtools/scripts/autogen-waf.sh
new file mode 100755
index 0000000..7a6e94c
--- /dev/null
+++ b/buildtools/scripts/autogen-waf.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+p=`dirname $0`
+
+echo "Setting up for waf build"
+
+echo "Looking for the buildtools directory"
+
+d="buildtools"
+while test \! -d "$p/$d"; do d="../$d"; done
+
+echo "Found buildtools in $p/$d"
+
+echo "Setting up configure"
+rm -f $p/configure $p/include/config*.h*
+sed "s|BUILDTOOLS|$d|g;s|BUILDPATH|$p|g" < "$p/$d/scripts/configure.waf" > $p/configure
+chmod +x $p/configure
+
+echo "Setting up Makefile"
+rm -f $p/makefile $p/Makefile
+sed "s|BUILDTOOLS|$d|g" < "$p/$d/scripts/Makefile.waf" > $p/Makefile
+
+echo "done. Now run $p/configure or $p/configure.developer then make."
+if [ $p != "." ]; then
+ echo "Notice: The build invoke path is not 'source4'! Use make with the parameter"
+ echo "-C <'source4' path>. Example: make -C source4 all"
+fi
diff --git a/buildtools/scripts/configure.waf b/buildtools/scripts/configure.waf
new file mode 100755
index 0000000..a7d8d1d
--- /dev/null
+++ b/buildtools/scripts/configure.waf
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+PREVPATH=`dirname $0`
+
+WAF=BUILDTOOLS/bin/waf
+
+# using JOBS=1 gives maximum compatibility with
+# systems like AIX which have broken threading in python
+JOBS=1
+export JOBS
+
+cd BUILDPATH || exit 1
+$WAF configure "$@" || exit 1
+cd $PREVPATH
diff --git a/buildtools/testwaf.sh b/buildtools/testwaf.sh
new file mode 100755
index 0000000..127e525
--- /dev/null
+++ b/buildtools/testwaf.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+set -e
+set -x
+
+d=$(dirname $0)
+
+cd $d/..
+PREFIX=$HOME/testprefix
+
+if [ $# -gt 0 ]; then
+ tests="$*"
+else
+ tests="lib/replace lib/talloc lib/tevent lib/tdb lib/ldb"
+fi
+
+echo "testing in dirs $tests"
+
+for d in $tests; do
+ echo "`date`: testing $d"
+ pushd $d
+ rm -rf bin
+ type waf
+ waf dist
+ ./configure -C --enable-developer --prefix=$PREFIX
+ time make
+ make install
+ make distcheck
+ case $d in
+ "lib/ldb")
+ ldd bin/ldbadd
+ ;;
+ "lib/replace")
+ ldd bin/replace_testsuite
+ ;;
+ "lib/talloc")
+ ldd bin/talloc_testsuite
+ ;;
+ "lib/tdb")
+ ldd bin/tdbtool
+ ;;
+ esac
+ popd
+done
+
+echo "testing python portability"
+pushd lib/talloc
+versions="python2.4 python2.5 python2.6 python3.0 python3.1"
+for p in $versions; do
+ ret=$(which $p || echo "failed")
+ if [ $ret = "failed" ]; then
+ echo "$p not found, skipping"
+ continue
+ fi
+ echo "Testing $p"
+ $p ../../buildtools/bin/waf configure -C --enable-developer --prefix=$PREFIX
+ $p ../../buildtools/bin/waf build install
+done
+popd
+
+echo "testing cross compiling"
+pushd lib/talloc
+ret=$(which arm-linux-gnueabi-gcc || echo "failed")
+if [ $ret != "failed" ]; then
+ CC=arm-linux-gnueabi-gcc ./configure -C --prefix=$PREFIX --cross-compile --cross-execute='runarm'
+ make && make install
+else
+ echo "Cross-compiler not installed, skipping test"
+fi
+popd
diff --git a/buildtools/wafsamba/README b/buildtools/wafsamba/README
new file mode 100644
index 0000000..1968b55
--- /dev/null
+++ b/buildtools/wafsamba/README
@@ -0,0 +1,8 @@
+This is a set of waf 'tools' to help make building the Samba
+components easier, by having common functions in one place. This gives
+us a more consistent build, and ensures that our project rules are
+obeyed
+
+
+TODO:
+ see http://wiki.samba.org/index.php/Waf
diff --git a/buildtools/wafsamba/__init__.py b/buildtools/wafsamba/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/buildtools/wafsamba/__init__.py
diff --git a/buildtools/wafsamba/configure_file.py b/buildtools/wafsamba/configure_file.py
new file mode 100644
index 0000000..8e2ba3b
--- /dev/null
+++ b/buildtools/wafsamba/configure_file.py
@@ -0,0 +1,44 @@
+# handle substitution of variables in .in files
+
+import Build, sys, Logs
+from samba_utils import *
+
+def subst_at_vars(task):
+ '''substiture @VAR@ style variables in a file'''
+
+ env = task.env
+ src = task.inputs[0].srcpath(env)
+ tgt = task.outputs[0].bldpath(env)
+
+ f = open(src, 'r')
+ s = f.read()
+ f.close()
+ # split on the vars
+ a = re.split('(@\w+@)', s)
+ out = []
+ for v in a:
+ if re.match('@\w+@', v):
+ vname = v[1:-1]
+ if not vname in task.env and vname.upper() in task.env:
+ vname = vname.upper()
+ if not vname in task.env:
+ Logs.error("Unknown substitution %s in %s" % (v, task.name))
+ sys.exit(1)
+ v = SUBST_VARS_RECURSIVE(task.env[vname], task.env)
+ out.append(v)
+ contents = ''.join(out)
+ f = open(tgt, 'w')
+ s = f.write(contents)
+ f.close()
+ return 0
+
+def CONFIGURE_FILE(bld, in_file, **kwargs):
+ '''configure file'''
+
+ base=os.path.basename(in_file)
+ t = bld.SAMBA_GENERATOR('INFILE_%s' % base,
+ rule = subst_at_vars,
+ source = in_file + '.in',
+ target = in_file,
+ vars = kwargs)
+Build.BuildContext.CONFIGURE_FILE = CONFIGURE_FILE
diff --git a/buildtools/wafsamba/gccdeps.py b/buildtools/wafsamba/gccdeps.py
new file mode 100644
index 0000000..2da42e6
--- /dev/null
+++ b/buildtools/wafsamba/gccdeps.py
@@ -0,0 +1,127 @@
+# encoding: utf-8
+# Thomas Nagy, 2008-2010 (ita)
+
+"""
+Execute the tasks with gcc -MD, read the dependencies from the .d file
+and prepare the dependency calculation for the next run
+"""
+
+import os, re, threading
+import Task, Logs, Utils, preproc
+from TaskGen import before, after, feature
+
+lock = threading.Lock()
+
+preprocessor_flag = '-MD'
+
+@feature('cc')
+@before('apply_core')
+def add_mmd_cc(self):
+ if self.env.get_flat('CCFLAGS').find(preprocessor_flag) < 0:
+ self.env.append_value('CCFLAGS', preprocessor_flag)
+
+@feature('cxx')
+@before('apply_core')
+def add_mmd_cxx(self):
+ if self.env.get_flat('CXXFLAGS').find(preprocessor_flag) < 0:
+ self.env.append_value('CXXFLAGS', preprocessor_flag)
+
+def scan(self):
+ "the scanner does not do anything initially"
+ nodes = self.generator.bld.node_deps.get(self.unique_id(), [])
+ names = []
+ return (nodes, names)
+
+re_o = re.compile("\.o$")
+re_src = re.compile("^(\.\.)[\\/](.*)$")
+
+def post_run(self):
+ # The following code is executed by threads, it is not safe, so a lock is needed...
+
+ if getattr(self, 'cached', None):
+ return Task.Task.post_run(self)
+
+ name = self.outputs[0].abspath(self.env)
+ name = re_o.sub('.d', name)
+ txt = Utils.readf(name)
+ #os.unlink(name)
+
+ txt = txt.replace('\\\n', '')
+
+ lst = txt.strip().split(':')
+ val = ":".join(lst[1:])
+ val = val.split()
+
+ nodes = []
+ bld = self.generator.bld
+
+ f = re.compile("^("+self.env.variant()+"|\.\.)[\\/](.*)$")
+ for x in val:
+ if os.path.isabs(x):
+
+ if not preproc.go_absolute:
+ continue
+
+ lock.acquire()
+ try:
+ node = bld.root.find_resource(x)
+ finally:
+ lock.release()
+ else:
+ g = re.search(re_src, x)
+ if g:
+ x = g.group(2)
+ lock.acquire()
+ try:
+ node = bld.bldnode.parent.find_resource(x)
+ finally:
+ lock.release()
+ else:
+ g = re.search(f, x)
+ if g:
+ x = g.group(2)
+ lock.acquire()
+ try:
+ node = bld.srcnode.find_resource(x)
+ finally:
+ lock.release()
+
+ if id(node) == id(self.inputs[0]):
+ # ignore the source file, it is already in the dependencies
+ # this way, successful config tests may be retrieved from the cache
+ continue
+
+ if not node:
+ raise ValueError('could not find %r for %r' % (x, self))
+ else:
+ nodes.append(node)
+
+ Logs.debug('deps: real scanner for %s returned %s' % (str(self), str(nodes)))
+
+ bld.node_deps[self.unique_id()] = nodes
+ bld.raw_deps[self.unique_id()] = []
+
+ try:
+ del self.cache_sig
+ except:
+ pass
+
+ Task.Task.post_run(self)
+
+import Constants, Utils
+def sig_implicit_deps(self):
+ try:
+ return Task.Task.sig_implicit_deps(self)
+ except Utils.WafError:
+ return Constants.SIG_NIL
+
+for name in 'cc cxx'.split():
+ try:
+ cls = Task.TaskBase.classes[name]
+ except KeyError:
+ pass
+ else:
+ cls.post_run = post_run
+ cls.scan = scan
+ cls.sig_implicit_deps = sig_implicit_deps
+
diff --git a/buildtools/wafsamba/generic_cc.py b/buildtools/wafsamba/generic_cc.py
new file mode 100644
index 0000000..504e902
--- /dev/null
+++ b/buildtools/wafsamba/generic_cc.py
@@ -0,0 +1,71 @@
+
+# compiler definition for a generic C compiler
+# based on suncc.py from waf
+
+import os, optparse
+import Utils, Options, Configure
+import ccroot, ar
+from Configure import conftest
+
+from compiler_cc import c_compiler
+
+c_compiler['default'] = ['gcc', 'generic_cc']
+c_compiler['hpux'] = ['gcc', 'generic_cc']
+
+@conftest
+def find_generic_cc(conf):
+ v = conf.env
+ cc = None
+ if v['CC']: cc = v['CC']
+ elif 'CC' in conf.environ: cc = conf.environ['CC']
+ if not cc: cc = conf.find_program('cc', var='CC')
+ if not cc: conf.fatal('generic_cc was not found')
+ cc = conf.cmd_to_list(cc)
+ v['CC'] = cc
+ v['CC_NAME'] = 'generic'
+
+@conftest
+def generic_cc_common_flags(conf):
+ v = conf.env
+
+ v['CC_SRC_F'] = ''
+ v['CC_TGT_F'] = ['-c', '-o', '']
+ v['CPPPATH_ST'] = '-I%s' # template for adding include paths
+
+ # linker
+ if not v['LINK_CC']: v['LINK_CC'] = v['CC']
+ v['CCLNK_SRC_F'] = ''
+ v['CCLNK_TGT_F'] = ['-o', '']
+
+ v['LIB_ST'] = '-l%s' # template for adding libs
+ v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
+ v['STATICLIB_ST'] = '-l%s'
+ v['STATICLIBPATH_ST'] = '-L%s'
+ v['CCDEFINES_ST'] = '-D%s'
+
+# v['SONAME_ST'] = '-Wl,-h -Wl,%s'
+# v['SHLIB_MARKER'] = '-Bdynamic'
+# v['STATICLIB_MARKER'] = '-Bstatic'
+
+ # program
+ v['program_PATTERN'] = '%s'
+
+ # shared library
+# v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC']
+# v['shlib_LINKFLAGS'] = ['-G']
+ v['shlib_PATTERN'] = 'lib%s.so'
+
+ # static lib
+# v['staticlib_LINKFLAGS'] = ['-Bstatic']
+# v['staticlib_PATTERN'] = 'lib%s.a'
+
+detect = '''
+find_generic_cc
+find_cpp
+find_ar
+generic_cc_common_flags
+cc_load_tools
+cc_add_flags
+link_add_flags
+'''
+
diff --git a/buildtools/wafsamba/hpuxcc.py b/buildtools/wafsamba/hpuxcc.py
new file mode 100644
index 0000000..c263556
--- /dev/null
+++ b/buildtools/wafsamba/hpuxcc.py
@@ -0,0 +1,56 @@
+# compiler definition for HPUX
+# based on suncc.py from waf
+
+import os, optparse, sys
+import Utils, Options, Configure
+import ccroot, ar
+from Configure import conftest
+import gcc
+
+
+@conftest
+def gcc_modifier_hpux(conf):
+ v=conf.env
+ v['CCFLAGS_DEBUG']=['-g']
+ v['CCFLAGS_RELEASE']=['-O2']
+ v['CC_SRC_F']=''
+ v['CC_TGT_F']=['-c','-o','']
+ v['CPPPATH_ST']='-I%s'
+ if not v['LINK_CC']:v['LINK_CC']=v['CC']
+ v['CCLNK_SRC_F']=''
+ v['CCLNK_TGT_F']=['-o','']
+ v['LIB_ST']='-l%s'
+ v['LIBPATH_ST']='-L%s'
+ v['STATICLIB_ST']='-l%s'
+ v['STATICLIBPATH_ST']='-L%s'
+ v['RPATH_ST']='-Wl,-rpath,%s'
+ v['CCDEFINES_ST']='-D%s'
+ v['SONAME_ST']='-Wl,-h,%s'
+ v['SHLIB_MARKER']=[]
+# v['STATICLIB_MARKER']='-Wl,-Bstatic'
+ v['FULLSTATIC_MARKER']='-static'
+ v['program_PATTERN']='%s'
+ v['shlib_CCFLAGS']=['-fPIC','-DPIC']
+ v['shlib_LINKFLAGS']=['-shared']
+ v['shlib_PATTERN']='lib%s.sl'
+# v['staticlib_LINKFLAGS']=['-Wl,-Bstatic']
+ v['staticlib_PATTERN']='lib%s.a'
+
+gcc.gcc_modifier_hpux = gcc_modifier_hpux
+
+from TaskGen import feature, after
+@feature('cprogram', 'cshlib')
+@after('apply_link', 'apply_lib_vars', 'apply_obj_vars')
+def hpux_addfullpath(self):
+ if sys.platform == 'hp-ux11':
+ link = getattr(self, 'link_task', None)
+ if link:
+ lst = link.env.LINKFLAGS
+ buf = []
+ for x in lst:
+ if x.startswith('-L'):
+ p2 = x[2:]
+ if not os.path.isabs(p2):
+ x = x[:2] + self.bld.srcnode.abspath(link.env) + "/../" + x[2:].lstrip('.')
+ buf.append(x)
+ link.env.LINKFLAGS = buf
diff --git a/buildtools/wafsamba/irixcc.py b/buildtools/wafsamba/irixcc.py
new file mode 100644
index 0000000..f3cb451
--- /dev/null
+++ b/buildtools/wafsamba/irixcc.py
@@ -0,0 +1,79 @@
+
+# compiler definition for irix/MIPSpro cc compiler
+# based on suncc.py from waf
+
+import os, optparse
+import Utils, Options, Configure
+import ccroot, ar
+from Configure import conftest
+
+from compiler_cc import c_compiler
+
+c_compiler['irix'] = ['gcc', 'irixcc']
+
+@conftest
+def find_irixcc(conf):
+ v = conf.env
+ cc = None
+ if v['CC']: cc = v['CC']
+ elif 'CC' in conf.environ: cc = conf.environ['CC']
+ if not cc: cc = conf.find_program('cc', var='CC')
+ if not cc: conf.fatal('irixcc was not found')
+ cc = conf.cmd_to_list(cc)
+
+ try:
+ if Utils.cmd_output(cc + ['-c99'] + ['-version']) != '':
+ conf.fatal('irixcc %r was not found' % cc)
+ except ValueError:
+ conf.fatal('irixcc -v could not be executed')
+
+ conf.env.append_unique('CCFLAGS', '-c99')
+
+ v['CC'] = cc
+ v['CC_NAME'] = 'irix'
+
+@conftest
+def irixcc_common_flags(conf):
+ v = conf.env
+
+ v['CC_SRC_F'] = ''
+ v['CC_TGT_F'] = ['-c', '-o', '']
+ v['CPPPATH_ST'] = '-I%s' # template for adding include paths
+
+ # linker
+ if not v['LINK_CC']: v['LINK_CC'] = v['CC']
+ v['CCLNK_SRC_F'] = ''
+ v['CCLNK_TGT_F'] = ['-o', '']
+
+ v['LIB_ST'] = '-l%s' # template for adding libs
+ v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
+ v['STATICLIB_ST'] = '-l%s'
+ v['STATICLIBPATH_ST'] = '-L%s'
+ v['CCDEFINES_ST'] = '-D%s'
+
+# v['SONAME_ST'] = '-Wl,-h -Wl,%s'
+# v['SHLIB_MARKER'] = '-Bdynamic'
+# v['STATICLIB_MARKER'] = '-Bstatic'
+
+ # program
+ v['program_PATTERN'] = '%s'
+
+ # shared library
+# v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC']
+# v['shlib_LINKFLAGS'] = ['-G']
+ v['shlib_PATTERN'] = 'lib%s.so'
+
+ # static lib
+# v['staticlib_LINKFLAGS'] = ['-Bstatic']
+# v['staticlib_PATTERN'] = 'lib%s.a'
+
+detect = '''
+find_irixcc
+find_cpp
+find_ar
+irixcc_common_flags
+cc_load_tools
+cc_add_flags
+link_add_flags
+'''
+
diff --git a/buildtools/wafsamba/nothreads.py b/buildtools/wafsamba/nothreads.py
new file mode 100644
index 0000000..075dcd3
--- /dev/null
+++ b/buildtools/wafsamba/nothreads.py
@@ -0,0 +1,220 @@
+# encoding: utf-8
+# Thomas Nagy, 2005-2008 (ita)
+
+# this replaces the core of Runner.py in waf with a varient that works
+# on systems with completely broken threading (such as Python 2.5.x on
+# AIX). For simplicity we enable this when JOBS=1, which is triggered
+# by the compatibility makefile used for the waf build. That also ensures
+# this code is tested, as it means it is used in the build farm, and by
+# anyone using 'make' to build Samba with waf
+
+"Execute the tasks"
+
+import sys, random, time, threading, traceback, os
+try: from Queue import Queue
+except ImportError: from queue import Queue
+import Build, Utils, Logs, Options
+from Logs import debug, error
+from Constants import *
+
+GAP = 15
+
+run_old = threading.Thread.run
+def run(*args, **kwargs):
+ try:
+ run_old(*args, **kwargs)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ sys.excepthook(*sys.exc_info())
+threading.Thread.run = run
+
+
+class TaskConsumer(object):
+ consumers = 1
+
+def process(tsk):
+ m = tsk.master
+ if m.stop:
+ m.out.put(tsk)
+ return
+
+ try:
+ tsk.generator.bld.printout(tsk.display())
+ if tsk.__class__.stat: ret = tsk.__class__.stat(tsk)
+ # actual call to task's run() function
+ else: ret = tsk.call_run()
+ except Exception, e:
+ tsk.err_msg = Utils.ex_stack()
+ tsk.hasrun = EXCEPTION
+
+ # TODO cleanup
+ m.error_handler(tsk)
+ m.out.put(tsk)
+ return
+
+ if ret:
+ tsk.err_code = ret
+ tsk.hasrun = CRASHED
+ else:
+ try:
+ tsk.post_run()
+ except Utils.WafError:
+ pass
+ except Exception:
+ tsk.err_msg = Utils.ex_stack()
+ tsk.hasrun = EXCEPTION
+ else:
+ tsk.hasrun = SUCCESS
+ if tsk.hasrun != SUCCESS:
+ m.error_handler(tsk)
+
+ m.out.put(tsk)
+
+class Parallel(object):
+ """
+ keep the consumer threads busy, and avoid consuming cpu cycles
+ when no more tasks can be added (end of the build, etc)
+ """
+ def __init__(self, bld, j=2):
+
+ # number of consumers
+ self.numjobs = j
+
+ self.manager = bld.task_manager
+ self.manager.current_group = 0
+
+ self.total = self.manager.total()
+
+ # tasks waiting to be processed - IMPORTANT
+ self.outstanding = []
+ self.maxjobs = MAXJOBS
+
+ # tasks that are awaiting for another task to complete
+ self.frozen = []
+
+ # tasks returned by the consumers
+ self.out = Queue(0)
+
+ self.count = 0 # tasks not in the producer area
+
+ self.processed = 1 # progress indicator
+
+ self.stop = False # error condition to stop the build
+ self.error = False # error flag
+
+ def get_next(self):
+ "override this method to schedule the tasks in a particular order"
+ if not self.outstanding:
+ return None
+ return self.outstanding.pop(0)
+
+ def postpone(self, tsk):
+ "override this method to schedule the tasks in a particular order"
+ # TODO consider using a deque instead
+ if random.randint(0, 1):
+ self.frozen.insert(0, tsk)
+ else:
+ self.frozen.append(tsk)
+
+ def refill_task_list(self):
+ "called to set the next group of tasks"
+
+ while self.count > self.numjobs + GAP or self.count >= self.maxjobs:
+ self.get_out()
+
+ while not self.outstanding:
+ if self.count:
+ self.get_out()
+
+ if self.frozen:
+ self.outstanding += self.frozen
+ self.frozen = []
+ elif not self.count:
+ (jobs, tmp) = self.manager.get_next_set()
+ if jobs is not None:
+ self.maxjobs = jobs
+ if tmp:
+ self.outstanding += tmp
+ break
+
+ def get_out(self):
+ "the tasks that are put to execute are all collected using get_out"
+ ret = self.out.get()
+ self.manager.add_finished(ret)
+ if not self.stop and getattr(ret, 'more_tasks', None):
+ self.outstanding += ret.more_tasks
+ self.total += len(ret.more_tasks)
+ self.count -= 1
+
+ def error_handler(self, tsk):
+ "by default, errors make the build stop (not thread safe so be careful)"
+ if not Options.options.keep:
+ self.stop = True
+ self.error = True
+
+ def start(self):
+ "execute the tasks"
+
+ while not self.stop:
+
+ self.refill_task_list()
+
+ # consider the next task
+ tsk = self.get_next()
+ if not tsk:
+ if self.count:
+ # tasks may add new ones after they are run
+ continue
+ else:
+ # no tasks to run, no tasks running, time to exit
+ break
+
+ if tsk.hasrun:
+ # if the task is marked as "run", just skip it
+ self.processed += 1
+ self.manager.add_finished(tsk)
+ continue
+
+ try:
+ st = tsk.runnable_status()
+ except Exception, e:
+ self.processed += 1
+ if self.stop and not Options.options.keep:
+ tsk.hasrun = SKIPPED
+ self.manager.add_finished(tsk)
+ continue
+ self.error_handler(tsk)
+ self.manager.add_finished(tsk)
+ tsk.hasrun = EXCEPTION
+ tsk.err_msg = Utils.ex_stack()
+ continue
+
+ if st == ASK_LATER:
+ self.postpone(tsk)
+ elif st == SKIP_ME:
+ self.processed += 1
+ tsk.hasrun = SKIPPED
+ self.manager.add_finished(tsk)
+ else:
+ # run me: put the task in ready queue
+ tsk.position = (self.processed, self.total)
+ self.count += 1
+ self.processed += 1
+ tsk.master = self
+
+ process(tsk)
+
+ # self.count represents the tasks that have been made available to the consumer threads
+ # collect all the tasks after an error else the message may be incomplete
+ while self.error and self.count:
+ self.get_out()
+
+ #print loop
+ assert (self.count == 0 or self.stop)
+
+
+# enable nothreads
+import Runner
+Runner.process = process
+Runner.Parallel = Parallel
diff --git a/buildtools/wafsamba/pkgconfig.py b/buildtools/wafsamba/pkgconfig.py
new file mode 100644
index 0000000..8a3f807
--- /dev/null
+++ b/buildtools/wafsamba/pkgconfig.py
@@ -0,0 +1,69 @@
+# handle substitution of variables in pc files
+
+import Build, sys, Logs
+from samba_utils import *
+
+def subst_at_vars(task):
+ '''substiture @VAR@ style variables in a file'''
+ src = task.inputs[0].srcpath(task.env)
+ tgt = task.outputs[0].bldpath(task.env)
+
+ f = open(src, 'r')
+ s = f.read()
+ f.close()
+ # split on the vars
+ a = re.split('(@\w+@)', s)
+ out = []
+ done_var = {}
+ back_sub = [ ('PREFIX', '${prefix}'), ('EXEC_PREFIX', '${exec_prefix}')]
+ for v in a:
+ if re.match('@\w+@', v):
+ vname = v[1:-1]
+ if not vname in task.env and vname.upper() in task.env:
+ vname = vname.upper()
+ if not vname in task.env:
+ Logs.error("Unknown substitution %s in %s" % (v, task.name))
+ sys.exit(1)
+ v = SUBST_VARS_RECURSIVE(task.env[vname], task.env)
+ # now we back substitute the allowed pc vars
+ for (b, m) in back_sub:
+ s = task.env[b]
+ if s == v[0:len(s)]:
+ if not b in done_var:
+ # we don't want to substitute the first usage
+ done_var[b] = True
+ else:
+ v = m + v[len(s):]
+ break
+ out.append(v)
+ contents = ''.join(out)
+ f = open(tgt, 'w')
+ s = f.write(contents)
+ f.close()
+ return 0
+
+
+def PKG_CONFIG_FILES(bld, pc_files, vnum=None):
+ '''install some pkg_config pc files'''
+ dest = '${PKGCONFIGDIR}'
+ dest = bld.EXPAND_VARIABLES(dest)
+ for f in TO_LIST(pc_files):
+ base=os.path.basename(f)
+ t = bld.SAMBA_GENERATOR('PKGCONFIG_%s' % base,
+ rule=subst_at_vars,
+ source=f+'.in',
+ target=f)
+ bld.add_manual_dependency(bld.path.find_or_declare(f), bld.env['PREFIX'])
+ t.vars = []
+ if t.env.RPATH_ON_INSTALL:
+ t.env.LIB_RPATH = t.env.RPATH_ST % t.env.LIBDIR
+ else:
+ t.env.LIB_RPATH = ''
+ if vnum:
+ t.env.PACKAGE_VERSION = vnum
+ for v in [ 'PREFIX', 'EXEC_PREFIX', 'LIB_RPATH' ]:
+ t.vars.append(t.env[v])
+ bld.INSTALL_FILES(dest, f, flat=True, destname=base)
+Build.BuildContext.PKG_CONFIG_FILES = PKG_CONFIG_FILES
+
+
diff --git a/buildtools/wafsamba/samba3.py b/buildtools/wafsamba/samba3.py
new file mode 100644
index 0000000..6d06fd9
--- /dev/null
+++ b/buildtools/wafsamba/samba3.py
@@ -0,0 +1,133 @@
+# a waf tool to add autoconf-like macros to the configure section
+# and for SAMBA_ macros for building libraries, binaries etc
+
+import Options, Build, os
+from optparse import SUPPRESS_HELP
+from samba_utils import os_path_relpath, TO_LIST
+from samba_autoconf import library_flags
+
+
+def SAMBA3_ADD_OPTION(opt, option, help=(), dest=None, default=True,
+ with_name="with", without_name="without"):
+ if default is None:
+ default_str = "auto"
+ elif default is True:
+ default_str = "yes"
+ elif default is False:
+ default_str = "no"
+ else:
+ default_str = str(default)
+
+ if help == ():
+ help = ("Build with %s support (default=%s)" % (option, default_str))
+ if dest is None:
+ dest = "with_%s" % option.replace('-', '_')
+
+ with_val = "--%s-%s" % (with_name, option)
+ without_val = "--%s-%s" % (without_name, option)
+
+ #FIXME: This is broken and will always default to "default" no matter if
+ # --with or --without is chosen.
+ opt.add_option(with_val, help=help, action="store_true", dest=dest,
+ default=default)
+ opt.add_option(without_val, help=SUPPRESS_HELP, action="store_false",
+ dest=dest)
+Options.Handler.SAMBA3_ADD_OPTION = SAMBA3_ADD_OPTION
+
+def SAMBA3_IS_STATIC_MODULE(bld, module):
+ '''Check whether module is in static list'''
+ if module in bld.env['static_modules']:
+ return True
+ return False
+Build.BuildContext.SAMBA3_IS_STATIC_MODULE = SAMBA3_IS_STATIC_MODULE
+
+def SAMBA3_IS_SHARED_MODULE(bld, module):
+ '''Check whether module is in shared list'''
+ if module in bld.env['shared_modules']:
+ return True
+ return False
+Build.BuildContext.SAMBA3_IS_SHARED_MODULE = SAMBA3_IS_SHARED_MODULE
+
+def SAMBA3_IS_ENABLED_MODULE(bld, module):
+ '''Check whether module is in either shared or static list '''
+ return SAMBA3_IS_STATIC_MODULE(bld, module) or SAMBA3_IS_SHARED_MODULE(bld, module)
+Build.BuildContext.SAMBA3_IS_ENABLED_MODULE = SAMBA3_IS_ENABLED_MODULE
+
+
+
+def s3_fix_kwargs(bld, kwargs):
+ '''fix the build arguments for s3 build rules to include the
+ necessary includes, subdir and cflags options '''
+ s3dir = os.path.join(bld.env.srcdir, 'source3')
+ s3reldir = os_path_relpath(s3dir, bld.curdir)
+
+ # the extra_includes list is relative to the source3 directory
+ extra_includes = [ '.', 'include', 'lib' ]
+ # local heimdal paths only included when USING_SYSTEM_KRB5 is not set
+ if not bld.CONFIG_SET("USING_SYSTEM_KRB5"):
+ extra_includes += [ '../source4/heimdal/lib/com_err',
+ '../source4/heimdal/lib/krb5',
+ '../source4/heimdal/lib/gssapi',
+ '../source4/heimdal_build',
+ '../bin/default/source4/heimdal/lib/asn1' ]
+
+ if bld.CONFIG_SET('USING_SYSTEM_TDB'):
+ (tdb_includes, tdb_ldflags, tdb_cpppath) = library_flags(bld, 'tdb')
+ extra_includes += tdb_cpppath
+ else:
+ extra_includes += [ '../lib/tdb/include' ]
+
+ if bld.CONFIG_SET('USING_SYSTEM_TEVENT'):
+ (tevent_includes, tevent_ldflags, tevent_cpppath) = library_flags(bld, 'tevent')
+ extra_includes += tevent_cpppath
+ else:
+ extra_includes += [ '../lib/tevent' ]
+
+ if bld.CONFIG_SET('USING_SYSTEM_TALLOC'):
+ (talloc_includes, talloc_ldflags, talloc_cpppath) = library_flags(bld, 'talloc')
+ extra_includes += talloc_cpppath
+ else:
+ extra_includes += [ '../lib/talloc' ]
+
+ if bld.CONFIG_SET('USING_SYSTEM_POPT'):
+ (popt_includes, popt_ldflags, popt_cpppath) = library_flags(bld, 'popt')
+ extra_includes += popt_cpppath
+ else:
+ extra_includes += [ '../lib/popt' ]
+
+ # s3 builds assume that they will have a bunch of extra include paths
+ includes = []
+ for d in extra_includes:
+ includes += [ os.path.join(s3reldir, d) ]
+
+ # the rule may already have some includes listed
+ if 'includes' in kwargs:
+ includes += TO_LIST(kwargs['includes'])
+ kwargs['includes'] = includes
+
+# these wrappers allow for mixing of S3 and S4 build rules in the one build
+
+def SAMBA3_LIBRARY(bld, name, *args, **kwargs):
+ s3_fix_kwargs(bld, kwargs)
+ return bld.SAMBA_LIBRARY(name, *args, **kwargs)
+Build.BuildContext.SAMBA3_LIBRARY = SAMBA3_LIBRARY
+
+def SAMBA3_MODULE(bld, name, *args, **kwargs):
+ s3_fix_kwargs(bld, kwargs)
+ return bld.SAMBA_MODULE(name, *args, **kwargs)
+Build.BuildContext.SAMBA3_MODULE = SAMBA3_MODULE
+
+def SAMBA3_SUBSYSTEM(bld, name, *args, **kwargs):
+ s3_fix_kwargs(bld, kwargs)
+ return bld.SAMBA_SUBSYSTEM(name, *args, **kwargs)
+Build.BuildContext.SAMBA3_SUBSYSTEM = SAMBA3_SUBSYSTEM
+
+def SAMBA3_BINARY(bld, name, *args, **kwargs):
+ s3_fix_kwargs(bld, kwargs)
+ return bld.SAMBA_BINARY(name, *args, **kwargs)
+Build.BuildContext.SAMBA3_BINARY = SAMBA3_BINARY
+
+def SAMBA3_PYTHON(bld, name, *args, **kwargs):
+ s3_fix_kwargs(bld, kwargs)
+ return bld.SAMBA_PYTHON(name, *args, **kwargs)
+Build.BuildContext.SAMBA3_PYTHON = SAMBA3_PYTHON
diff --git a/buildtools/wafsamba/samba_abi.py b/buildtools/wafsamba/samba_abi.py
new file mode 100644
index 0000000..76acd00
--- /dev/null
+++ b/buildtools/wafsamba/samba_abi.py
@@ -0,0 +1,254 @@
+# functions for handling ABI checking of libraries
+
+import Options, Utils, os, Logs, samba_utils, sys, Task, fnmatch, re, Build
+from TaskGen import feature, before, after
+
+# these type maps cope with platform specific names for common types
+# please add new type mappings into the list below
+abi_type_maps = {
+ '_Bool' : 'bool',
+ 'struct __va_list_tag *' : 'va_list'
+ }
+
+version_key = lambda x: map(int, x.split("."))
+
+def normalise_signature(sig):
+ '''normalise a signature from gdb'''
+ sig = sig.strip()
+ sig = re.sub('^\$[0-9]+\s=\s\{(.+)\}$', r'\1', sig)
+ sig = re.sub('^\$[0-9]+\s=\s\{(.+)\}(\s0x[0-9a-f]+\s<\w+>)+$', r'\1', sig)
+ sig = re.sub('^\$[0-9]+\s=\s(0x[0-9a-f]+)\s?(<\w+>)?$', r'\1', sig)
+ sig = re.sub('0x[0-9a-f]+', '0xXXXX', sig)
+ sig = re.sub('", <incomplete sequence (\\\\[a-z0-9]+)>', r'\1"', sig)
+
+ for t in abi_type_maps:
+ # we need to cope with non-word characters in mapped types
+ m = t
+ m = m.replace('*', '\*')
+ if m[-1].isalnum() or m[-1] == '_':
+ m += '\\b'
+ if m[0].isalnum() or m[0] == '_':
+ m = '\\b' + m
+ sig = re.sub(m, abi_type_maps[t], sig)
+ return sig
+
+
+def normalise_varargs(sig):
+ '''cope with older versions of gdb'''
+ sig = re.sub(',\s\.\.\.', '', sig)
+ return sig
+
+
+def parse_sigs(sigs, abi_match):
+ '''parse ABI signatures file'''
+ abi_match = samba_utils.TO_LIST(abi_match)
+ ret = {}
+ a = sigs.split('\n')
+ for s in a:
+ if s.find(':') == -1:
+ continue
+ sa = s.split(':')
+ if abi_match:
+ matched = False
+ negative = False
+ for p in abi_match:
+ if p[0] == '!' and fnmatch.fnmatch(sa[0], p[1:]):
+ negative = True
+ break
+ elif fnmatch.fnmatch(sa[0], p):
+ matched = True
+ break
+ if (not matched) and negative:
+ continue
+ Logs.debug("%s -> %s" % (sa[1], normalise_signature(sa[1])))
+ ret[sa[0]] = normalise_signature(sa[1])
+ return ret
+
+def save_sigs(sig_file, parsed_sigs):
+ '''save ABI signatures to a file'''
+ sigs = ''
+ for s in sorted(parsed_sigs.keys()):
+ sigs += '%s: %s\n' % (s, parsed_sigs[s])
+ return samba_utils.save_file(sig_file, sigs, create_dir=True)
+
+
+def abi_check_task(self):
+ '''check if the ABI has changed'''
+ abi_gen = self.ABI_GEN
+
+ libpath = self.inputs[0].abspath(self.env)
+ libname = os.path.basename(libpath)
+
+ sigs = Utils.cmd_output([abi_gen, libpath])
+ parsed_sigs = parse_sigs(sigs, self.ABI_MATCH)
+
+ sig_file = self.ABI_FILE
+
+ old_sigs = samba_utils.load_file(sig_file)
+ if old_sigs is None or Options.options.ABI_UPDATE:
+ if not save_sigs(sig_file, parsed_sigs):
+ raise Utils.WafError('Failed to save ABI file "%s"' % sig_file)
+ Logs.warn('Generated ABI signatures %s' % sig_file)
+ return
+
+ parsed_old_sigs = parse_sigs(old_sigs, self.ABI_MATCH)
+
+ # check all old sigs
+ got_error = False
+ for s in parsed_old_sigs:
+ if not s in parsed_sigs:
+ Logs.error('%s: symbol %s has been removed - please update major version\n\tsignature: %s' % (
+ libname, s, parsed_old_sigs[s]))
+ got_error = True
+ elif normalise_varargs(parsed_old_sigs[s]) != normalise_varargs(parsed_sigs[s]):
+ Logs.error('%s: symbol %s has changed - please update major version\n\told_signature: %s\n\tnew_signature: %s' % (
+ libname, s, parsed_old_sigs[s], parsed_sigs[s]))
+ got_error = True
+
+ for s in parsed_sigs:
+ if not s in parsed_old_sigs:
+ Logs.error('%s: symbol %s has been added - please mark it _PRIVATE_ or update minor version\n\tsignature: %s' % (
+ libname, s, parsed_sigs[s]))
+ got_error = True
+
+ if got_error:
+ raise Utils.WafError('ABI for %s has changed - please fix library version then build with --abi-update\nSee http://wiki.samba.org/index.php/Waf#ABI_Checking for more information\nIf you have not changed any ABI, and your platform always gives this error, please configure with --abi-check-disable to skip this check' % libname)
+
+
+t = Task.task_type_from_func('abi_check', abi_check_task, color='BLUE', ext_in='.bin')
+t.quiet = True
+# allow "waf --abi-check" to force re-checking the ABI
+if '--abi-check' in sys.argv:
+ Task.always_run(t)
+
+@after('apply_link')
+@feature('abi_check')
+def abi_check(self):
+ '''check that ABI matches saved signatures'''
+ env = self.bld.env
+ if not env.ABI_CHECK or self.abi_directory is None:
+ return
+
+ # if the platform doesn't support -fvisibility=hidden then the ABI
+ # checks become fairly meaningless
+ if not env.HAVE_VISIBILITY_ATTR:
+ return
+
+ topsrc = self.bld.srcnode.abspath()
+ abi_gen = os.path.join(topsrc, 'buildtools/scripts/abi_gen.sh')
+
+ abi_file = "%s/%s-%s.sigs" % (self.abi_directory, self.name, self.vnum)
+
+ tsk = self.create_task('abi_check', self.link_task.outputs[0])
+ tsk.ABI_FILE = abi_file
+ tsk.ABI_MATCH = self.abi_match
+ tsk.ABI_GEN = abi_gen
+
+
+def abi_process_file(fname, version, symmap):
+ '''process one ABI file, adding new symbols to the symmap'''
+ f = open(fname, mode='r')
+ for line in f:
+ symname = line.split(":")[0]
+ if not symname in symmap:
+ symmap[symname] = version
+ f.close()
+
+
+def abi_write_vscript(f, libname, current_version, versions, symmap, abi_match):
+ """Write a vscript file for a library in --version-script format.
+
+ :param f: File-like object to write to
+ :param libname: Name of the library, uppercased
+ :param current_version: Current version
+ :param versions: Versions to consider
+ :param symmap: Dictionary mapping symbols -> version
+ :param abi_match: List of symbols considered to be public in the current
+ version
+ """
+
+ invmap = {}
+ for s in symmap:
+ invmap.setdefault(symmap[s], []).append(s)
+
+ last_key = ""
+ versions = sorted(versions, key=version_key)
+ for k in versions:
+ symver = "%s_%s" % (libname, k)
+ if symver == current_version:
+ break
+ f.write("%s {\n" % symver)
+ if k in sorted(invmap.keys()):
+ f.write("\tglobal:\n")
+ for s in invmap.get(k, []):
+ f.write("\t\t%s;\n" % s);
+ f.write("}%s;\n\n" % last_key)
+ last_key = " %s" % symver
+ f.write("%s {\n" % current_version)
+ local_abi = filter(lambda x: x[0] == '!', abi_match)
+ global_abi = filter(lambda x: x[0] != '!', abi_match)
+ f.write("\tglobal:\n")
+ if len(global_abi) > 0:
+ for x in global_abi:
+ f.write("\t\t%s;\n" % x)
+ else:
+ f.write("\t\t*;\n")
+ if abi_match != ["*"]:
+ f.write("\tlocal:\n")
+ for x in local_abi:
+ f.write("\t\t%s;\n" % x[1:])
+ if len(global_abi) > 0:
+ f.write("\t\t*;\n")
+ f.write("};\n")
+
+
+def abi_build_vscript(task):
+ '''generate a vscript file for our public libraries'''
+
+ tgt = task.outputs[0].bldpath(task.env)
+
+ symmap = {}
+ versions = []
+ for f in task.inputs:
+ fname = f.abspath(task.env)
+ basename = os.path.basename(fname)
+ version = basename[len(task.env.LIBNAME)+1:-len(".sigs")]
+ versions.append(version)
+ abi_process_file(fname, version, symmap)
+ f = open(tgt, mode='w')
+ try:
+ abi_write_vscript(f, task.env.LIBNAME, task.env.VERSION, versions,
+ symmap, task.env.ABI_MATCH)
+ finally:
+ f.close()
+
+
+def ABI_VSCRIPT(bld, libname, abi_directory, version, vscript, abi_match=None):
+ '''generate a vscript file for our public libraries'''
+ if abi_directory:
+ source = bld.path.ant_glob('%s/%s-[0-9]*.sigs' % (abi_directory, libname))
+ def abi_file_key(path):
+ return version_key(path[:-len(".sigs")].rsplit("-")[-1])
+ source = sorted(source.split(), key=abi_file_key)
+ else:
+ source = ''
+
+ libname = os.path.basename(libname)
+ version = os.path.basename(version)
+ libname = libname.replace("-", "_").replace("+","_").upper()
+ version = version.replace("-", "_").replace("+","_").upper()
+
+ t = bld.SAMBA_GENERATOR(vscript,
+ rule=abi_build_vscript,
+ source=source,
+ group='vscripts',
+ target=vscript)
+ if abi_match is None:
+ abi_match = ["*"]
+ else:
+ abi_match = samba_utils.TO_LIST(abi_match)
+ t.env.ABI_MATCH = abi_match
+ t.env.VERSION = version
+ t.env.LIBNAME = libname
+ t.vars = ['LIBNAME', 'VERSION', 'ABI_MATCH']
+Build.BuildContext.ABI_VSCRIPT = ABI_VSCRIPT
diff --git a/buildtools/wafsamba/samba_autoconf.py b/buildtools/wafsamba/samba_autoconf.py
new file mode 100644
index 0000000..4f646fd
--- /dev/null
+++ b/buildtools/wafsamba/samba_autoconf.py
@@ -0,0 +1,852 @@
+# a waf tool to add autoconf-like macros to the configure section
+
+import Build, os, sys, Options, preproc, Logs
+import string
+from Configure import conf
+from samba_utils import *
+import samba_cross
+
+missing_headers = set()
+
+####################################################
+# some autoconf like helpers, to make the transition
+# to waf a bit easier for those used to autoconf
+# m4 files
+
+@runonce
+@conf
+def DEFINE(conf, d, v, add_to_cflags=False, quote=False):
+ '''define a config option'''
+ conf.define(d, v, quote=quote)
+ if add_to_cflags:
+ conf.env.append_value('CCDEFINES', d + '=' + str(v))
+
+def hlist_to_string(conf, headers=None):
+ '''convert a headers list to a set of #include lines'''
+ hdrs=''
+ hlist = conf.env.hlist
+ if headers:
+ hlist = hlist[:]
+ hlist.extend(TO_LIST(headers))
+ for h in hlist:
+ hdrs += '#include <%s>\n' % h
+ return hdrs
+
+
+@conf
+def COMPOUND_START(conf, msg):
+ '''start a compound test'''
+ def null_check_message_1(self,*k,**kw):
+ return
+ def null_check_message_2(self,*k,**kw):
+ return
+
+ v = getattr(conf.env, 'in_compound', [])
+ if v != [] and v != 0:
+ conf.env.in_compound = v + 1
+ return
+ conf.check_message_1(msg)
+ conf.saved_check_message_1 = conf.check_message_1
+ conf.check_message_1 = null_check_message_1
+ conf.saved_check_message_2 = conf.check_message_2
+ conf.check_message_2 = null_check_message_2
+ conf.env.in_compound = 1
+
+
+@conf
+def COMPOUND_END(conf, result):
+ '''start a compound test'''
+ conf.env.in_compound -= 1
+ if conf.env.in_compound != 0:
+ return
+ conf.check_message_1 = conf.saved_check_message_1
+ conf.check_message_2 = conf.saved_check_message_2
+ p = conf.check_message_2
+ if result is True:
+ p('ok')
+ elif not result:
+ p('not found', 'YELLOW')
+ else:
+ p(result)
+
+
+@feature('nolink')
+def nolink(self):
+ '''using the nolink type in conf.check() allows us to avoid
+ the link stage of a test, thus speeding it up for tests
+ that where linking is not needed'''
+ pass
+
+
+def CHECK_HEADER(conf, h, add_headers=False, lib=None):
+ '''check for a header'''
+ if h in missing_headers and lib is None:
+ return False
+ d = h.upper().replace('/', '_')
+ d = d.replace('.', '_')
+ d = d.replace('-', '_')
+ d = 'HAVE_%s' % d
+ if CONFIG_SET(conf, d):
+ if add_headers:
+ if not h in conf.env.hlist:
+ conf.env.hlist.append(h)
+ return True
+
+ (ccflags, ldflags, cpppath) = library_flags(conf, lib)
+
+ hdrs = hlist_to_string(conf, headers=h)
+ if lib is None:
+ lib = ""
+ ret = conf.check(fragment='%s\nint main(void) { return 0; }' % hdrs,
+ type='nolink',
+ execute=0,
+ ccflags=ccflags,
+ includes=cpppath,
+ uselib=lib.upper(),
+ msg="Checking for header %s" % h)
+ if not ret:
+ missing_headers.add(h)
+ return False
+
+ conf.DEFINE(d, 1)
+ if add_headers and not h in conf.env.hlist:
+ conf.env.hlist.append(h)
+ return ret
+
+
+@conf
+def CHECK_HEADERS(conf, headers, add_headers=False, together=False, lib=None):
+ '''check for a list of headers
+
+ when together==True, then the headers accumulate within this test.
+ This is useful for interdependent headers
+ '''
+ ret = True
+ if not add_headers and together:
+ saved_hlist = conf.env.hlist[:]
+ set_add_headers = True
+ else:
+ set_add_headers = add_headers
+ for hdr in TO_LIST(headers):
+ if not CHECK_HEADER(conf, hdr, set_add_headers, lib=lib):
+ ret = False
+ if not add_headers and together:
+ conf.env.hlist = saved_hlist
+ return ret
+
+
+def header_list(conf, headers=None, lib=None):
+ '''form a list of headers which exist, as a string'''
+ hlist=[]
+ if headers is not None:
+ for h in TO_LIST(headers):
+ if CHECK_HEADER(conf, h, add_headers=False, lib=lib):
+ hlist.append(h)
+ return hlist_to_string(conf, headers=hlist)
+
+
+@conf
+def CHECK_TYPE(conf, t, alternate=None, headers=None, define=None, lib=None, msg=None):
+ '''check for a single type'''
+ if define is None:
+ define = 'HAVE_' + t.upper().replace(' ', '_')
+ if msg is None:
+ msg='Checking for %s' % t
+ ret = CHECK_CODE(conf, '%s _x' % t,
+ define,
+ execute=False,
+ headers=headers,
+ local_include=False,
+ msg=msg,
+ lib=lib,
+ link=False)
+ if not ret and alternate:
+ conf.DEFINE(t, alternate)
+ return ret
+
+
+@conf
+def CHECK_TYPES(conf, list, headers=None, define=None, alternate=None, lib=None):
+ '''check for a list of types'''
+ ret = True
+ for t in TO_LIST(list):
+ if not CHECK_TYPE(conf, t, headers=headers,
+ define=define, alternate=alternate, lib=lib):
+ ret = False
+ return ret
+
+
+@conf
+def CHECK_TYPE_IN(conf, t, headers=None, alternate=None, define=None):
+ '''check for a single type with a header'''
+ return CHECK_TYPE(conf, t, headers=headers, alternate=alternate, define=define)
+
+
+@conf
+def CHECK_VARIABLE(conf, v, define=None, always=False,
+ headers=None, msg=None, lib=None):
+ '''check for a variable declaration (or define)'''
+ if define is None:
+ define = 'HAVE_%s' % v.upper()
+
+ if msg is None:
+ msg="Checking for variable %s" % v
+
+ return CHECK_CODE(conf,
+ # we need to make sure the compiler doesn't
+ # optimize it out...
+ '''
+ #ifndef %s
+ void *_x; _x=(void *)&%s; return (int)_x;
+ #endif
+ return 0
+ ''' % (v, v),
+ execute=False,
+ link=False,
+ msg=msg,
+ local_include=False,
+ lib=lib,
+ headers=headers,
+ define=define,
+ always=always)
+
+
+@conf
+def CHECK_DECLS(conf, vars, reverse=False, headers=None, always=False):
+ '''check a list of variable declarations, using the HAVE_DECL_xxx form
+ of define
+
+ When reverse==True then use HAVE_xxx_DECL instead of HAVE_DECL_xxx
+ '''
+ ret = True
+ for v in TO_LIST(vars):
+ if not reverse:
+ define='HAVE_DECL_%s' % v.upper()
+ else:
+ define='HAVE_%s_DECL' % v.upper()
+ if not CHECK_VARIABLE(conf, v,
+ define=define,
+ headers=headers,
+ msg='Checking for declaration of %s' % v,
+ always=always):
+ ret = False
+ return ret
+
+
+def CHECK_FUNC(conf, f, link=True, lib=None, headers=None):
+ '''check for a function'''
+ define='HAVE_%s' % f.upper()
+
+ ret = False
+
+ conf.COMPOUND_START('Checking for %s' % f)
+
+ if link is None or link:
+ ret = CHECK_CODE(conf,
+ # this is based on the autoconf strategy
+ '''
+ #define %s __fake__%s
+ #ifdef HAVE_LIMITS_H
+ # include <limits.h>
+ #else
+ # include <assert.h>
+ #endif
+ #undef %s
+ #if defined __stub_%s || defined __stub___%s
+ #error "bad glibc stub"
+ #endif
+ extern char %s();
+ int main() { return %s(); }
+ ''' % (f, f, f, f, f, f, f),
+ execute=False,
+ link=True,
+ addmain=False,
+ add_headers=False,
+ define=define,
+ local_include=False,
+ lib=lib,
+ headers=headers,
+ msg='Checking for %s' % f)
+
+ if not ret:
+ ret = CHECK_CODE(conf,
+ # it might be a macro
+ # we need to make sure the compiler doesn't
+ # optimize it out...
+ 'void *__x = (void *)%s; return (int)__x' % f,
+ execute=False,
+ link=True,
+ addmain=True,
+ add_headers=True,
+ define=define,
+ local_include=False,
+ lib=lib,
+ headers=headers,
+ msg='Checking for macro %s' % f)
+
+ if not ret and (link is None or not link):
+ ret = CHECK_VARIABLE(conf, f,
+ define=define,
+ headers=headers,
+ msg='Checking for declaration of %s' % f)
+ conf.COMPOUND_END(ret)
+ return ret
+
+
+@conf
+def CHECK_FUNCS(conf, list, link=True, lib=None, headers=None):
+ '''check for a list of functions'''
+ ret = True
+ for f in TO_LIST(list):
+ if not CHECK_FUNC(conf, f, link=link, lib=lib, headers=headers):
+ ret = False
+ return ret
+
+
+@conf
+def CHECK_SIZEOF(conf, vars, headers=None, define=None, critical=True):
+ '''check the size of a type'''
+ for v in TO_LIST(vars):
+ v_define = define
+ ret = False
+ if v_define is None:
+ v_define = 'SIZEOF_%s' % v.upper().replace(' ', '_')
+ for size in list((1, 2, 4, 8, 16, 32)):
+ if CHECK_CODE(conf,
+ 'static int test_array[1 - 2 * !(((long int)(sizeof(%s))) <= %d)];' % (v, size),
+ define=v_define,
+ quote=False,
+ headers=headers,
+ local_include=False,
+ msg="Checking if size of %s == %d" % (v, size)):
+ conf.DEFINE(v_define, size)
+ ret = True
+ break
+ if not ret and critical:
+ Logs.error("Couldn't determine size of '%s'" % v)
+ sys.exit(1)
+ return ret
+
+@conf
+def CHECK_VALUEOF(conf, v, headers=None, define=None):
+ '''check the value of a variable/define'''
+ ret = True
+ v_define = define
+ if v_define is None:
+ v_define = 'VALUEOF_%s' % v.upper().replace(' ', '_')
+ if CHECK_CODE(conf,
+ 'printf("%%u", (unsigned)(%s))' % v,
+ define=v_define,
+ execute=True,
+ define_ret=True,
+ quote=False,
+ headers=headers,
+ local_include=False,
+ msg="Checking value of %s" % v):
+ return int(conf.env[v_define])
+
+ return None
+
+@conf
+def CHECK_CODE(conf, code, define,
+ always=False, execute=False, addmain=True,
+ add_headers=True, mandatory=False,
+ headers=None, msg=None, cflags='', includes='# .',
+ local_include=True, lib=None, link=True,
+ define_ret=False, quote=False,
+ on_target=True):
+ '''check if some code compiles and/or runs'''
+
+ if CONFIG_SET(conf, define):
+ return True
+
+ if headers is not None:
+ CHECK_HEADERS(conf, headers=headers, lib=lib)
+
+ if add_headers:
+ hdrs = header_list(conf, headers=headers, lib=lib)
+ else:
+ hdrs = ''
+ if execute:
+ execute = 1
+ else:
+ execute = 0
+
+ defs = conf.get_config_header()
+
+ if addmain:
+ fragment='%s\n%s\n int main(void) { %s; return 0; }\n' % (defs, hdrs, code)
+ else:
+ fragment='%s\n%s\n%s\n' % (defs, hdrs, code)
+
+ if msg is None:
+ msg="Checking for %s" % define
+
+ cflags = TO_LIST(cflags)
+
+ if local_include:
+ cflags.append('-I%s' % conf.curdir)
+
+ if not link:
+ type='nolink'
+ else:
+ type='cprogram'
+
+ uselib = TO_LIST(lib)
+
+ (ccflags, ldflags, cpppath) = library_flags(conf, uselib)
+
+ includes = TO_LIST(includes)
+ includes.extend(cpppath)
+
+ uselib = [l.upper() for l in uselib]
+
+ cflags.extend(ccflags)
+
+ if on_target:
+ exec_args = conf.SAMBA_CROSS_ARGS(msg=msg)
+ else:
+ exec_args = []
+
+ conf.COMPOUND_START(msg)
+
+ ret = conf.check(fragment=fragment,
+ execute=execute,
+ define_name = define,
+ mandatory = mandatory,
+ ccflags=cflags,
+ ldflags=ldflags,
+ includes=includes,
+ uselib=uselib,
+ type=type,
+ msg=msg,
+ quote=quote,
+ exec_args=exec_args,
+ define_ret=define_ret)
+ if not ret and CONFIG_SET(conf, define):
+ # sometimes conf.check() returns false, but it
+ # sets the define. Maybe a waf bug?
+ ret = True
+ if ret:
+ if not define_ret:
+ conf.DEFINE(define, 1)
+ conf.COMPOUND_END(True)
+ else:
+ conf.COMPOUND_END(conf.env[define])
+ return True
+ if always:
+ conf.DEFINE(define, 0)
+ conf.COMPOUND_END(False)
+ return False
+
+
+
+@conf
+def CHECK_STRUCTURE_MEMBER(conf, structname, member,
+ always=False, define=None, headers=None):
+ '''check for a structure member'''
+ if define is None:
+ define = 'HAVE_%s' % member.upper()
+ return CHECK_CODE(conf,
+ '%s s; void *_x; _x=(void *)&s.%s' % (structname, member),
+ define,
+ execute=False,
+ link=False,
+ always=always,
+ headers=headers,
+ local_include=False,
+ msg="Checking for member %s in %s" % (member, structname))
+
+
+@conf
+def CHECK_CFLAGS(conf, cflags, fragment='int main(void) { return 0; }\n'):
+ '''check if the given cflags are accepted by the compiler
+ '''
+ return conf.check(fragment=fragment,
+ execute=0,
+ type='nolink',
+ ccflags=cflags,
+ msg="Checking compiler accepts %s" % cflags)
+
+@conf
+def CHECK_LDFLAGS(conf, ldflags):
+ '''check if the given ldflags are accepted by the linker
+ '''
+ return conf.check(fragment='int main(void) { return 0; }\n',
+ execute=0,
+ ldflags=ldflags,
+ msg="Checking linker accepts %s" % ldflags)
+
+
+@conf
+def CONFIG_GET(conf, option):
+ '''return True if a configuration option was found'''
+ if (option in conf.env):
+ return conf.env[option]
+ else:
+ return None
+
+@conf
+def CONFIG_SET(conf, option):
+ '''return True if a configuration option was found'''
+ if option not in conf.env:
+ return False
+ v = conf.env[option]
+ if v is None:
+ return False
+ if v == []:
+ return False
+ if v == ():
+ return False
+ return True
+
+@conf
+def CONFIG_RESET(conf, option):
+ if option not in conf.env:
+ return
+ del conf.env[option]
+
+Build.BuildContext.CONFIG_RESET = CONFIG_RESET
+Build.BuildContext.CONFIG_SET = CONFIG_SET
+Build.BuildContext.CONFIG_GET = CONFIG_GET
+
+
+def library_flags(self, libs):
+ '''work out flags from pkg_config'''
+ ccflags = []
+ ldflags = []
+ cpppath = []
+ for lib in TO_LIST(libs):
+ # note that we do not add the -I and -L in here, as that is added by the waf
+ # core. Adding it here would just change the order that it is put on the link line
+ # which can cause system paths to be added before internal libraries
+ extra_ccflags = TO_LIST(getattr(self.env, 'CCFLAGS_%s' % lib.upper(), []))
+ extra_ldflags = TO_LIST(getattr(self.env, 'LDFLAGS_%s' % lib.upper(), []))
+ extra_cpppath = TO_LIST(getattr(self.env, 'CPPPATH_%s' % lib.upper(), []))
+ ccflags.extend(extra_ccflags)
+ ldflags.extend(extra_ldflags)
+ cpppath.extend(extra_cpppath)
+ if 'EXTRA_LDFLAGS' in self.env:
+ ldflags.extend(self.env['EXTRA_LDFLAGS'])
+
+ ccflags = unique_list(ccflags)
+ ldflags = unique_list(ldflags)
+ cpppath = unique_list(cpppath)
+ return (ccflags, ldflags, cpppath)
+
+
+@conf
+def CHECK_LIB(conf, libs, mandatory=False, empty_decl=True, set_target=True, shlib=False):
+ '''check if a set of libraries exist as system libraries
+
+ returns the sublist of libs that do exist as a syslib or []
+ '''
+
+ fragment= '''
+int foo()
+{
+ int v = 2;
+ return v*2;
+}
+'''
+ ret = []
+ liblist = TO_LIST(libs)
+ for lib in liblist[:]:
+ if GET_TARGET_TYPE(conf, lib) == 'SYSLIB':
+ ret.append(lib)
+ continue
+
+ (ccflags, ldflags, cpppath) = library_flags(conf, lib)
+ if shlib:
+ res = conf.check(features='cc cshlib', fragment=fragment, lib=lib, uselib_store=lib, ccflags=ccflags, ldflags=ldflags, uselib=lib.upper())
+ else:
+ res = conf.check(lib=lib, uselib_store=lib, ccflags=ccflags, ldflags=ldflags, uselib=lib.upper())
+
+ if not res:
+ if mandatory:
+ Logs.error("Mandatory library '%s' not found for functions '%s'" % (lib, list))
+ sys.exit(1)
+ if empty_decl:
+ # if it isn't a mandatory library, then remove it from dependency lists
+ if set_target:
+ SET_TARGET_TYPE(conf, lib, 'EMPTY')
+ else:
+ conf.define('HAVE_LIB%s' % lib.upper().replace('-','_').replace('.','_'), 1)
+ conf.env['LIB_' + lib.upper()] = lib
+ if set_target:
+ conf.SET_TARGET_TYPE(lib, 'SYSLIB')
+ ret.append(lib)
+
+ return ret
+
+
+
+@conf
+def CHECK_FUNCS_IN(conf, list, library, mandatory=False, checklibc=False,
+ headers=None, link=True, empty_decl=True, set_target=True):
+ """
+ check that the functions in 'list' are available in 'library'
+ if they are, then make that library available as a dependency
+
+ if the library is not available and mandatory==True, then
+ raise an error.
+
+ If the library is not available and mandatory==False, then
+ add the library to the list of dependencies to remove from
+ build rules
+
+ optionally check for the functions first in libc
+ """
+ remaining = TO_LIST(list)
+ liblist = TO_LIST(library)
+
+ # check if some already found
+ for f in remaining[:]:
+ if CONFIG_SET(conf, 'HAVE_%s' % f.upper()):
+ remaining.remove(f)
+
+ # see if the functions are in libc
+ if checklibc:
+ for f in remaining[:]:
+ if CHECK_FUNC(conf, f, link=True, headers=headers):
+ remaining.remove(f)
+
+ if remaining == []:
+ for lib in liblist:
+ if GET_TARGET_TYPE(conf, lib) != 'SYSLIB' and empty_decl:
+ SET_TARGET_TYPE(conf, lib, 'EMPTY')
+ return True
+
+ checklist = conf.CHECK_LIB(liblist, empty_decl=empty_decl, set_target=set_target)
+ for lib in liblist[:]:
+ if not lib in checklist and mandatory:
+ Logs.error("Mandatory library '%s' not found for functions '%s'" % (lib, list))
+ sys.exit(1)
+
+ ret = True
+ for f in remaining:
+ if not CHECK_FUNC(conf, f, lib=' '.join(checklist), headers=headers, link=link):
+ ret = False
+
+ return ret
+
+
+@conf
+def IN_LAUNCH_DIR(conf):
+ '''return True if this rule is being run from the launch directory'''
+ return os.path.realpath(conf.curdir) == os.path.realpath(Options.launch_dir)
+Options.Handler.IN_LAUNCH_DIR = IN_LAUNCH_DIR
+
+
+@conf
+def SAMBA_CONFIG_H(conf, path=None):
+ '''write out config.h in the right directory'''
+ # we don't want to produce a config.h in places like lib/replace
+ # when we are building projects that depend on lib/replace
+ if not IN_LAUNCH_DIR(conf):
+ return
+
+ if conf.CHECK_CFLAGS(['-fstack-protector']) and conf.CHECK_LDFLAGS(['-fstack-protector']):
+ conf.ADD_CFLAGS('-fstack-protector')
+ conf.ADD_LDFLAGS('-fstack-protector')
+
+ if Options.options.debug:
+ conf.ADD_CFLAGS('-g', testflags=True)
+
+ if Options.options.developer:
+ conf.env.DEVELOPER_MODE = True
+
+ conf.ADD_CFLAGS('-g', testflags=True)
+ conf.ADD_CFLAGS('-Wall', testflags=True)
+ conf.ADD_CFLAGS('-Wshadow', testflags=True)
+ conf.ADD_CFLAGS('-Wmissing-prototypes', testflags=True)
+ conf.ADD_CFLAGS('-Wcast-align -Wcast-qual', testflags=True)
+ conf.ADD_CFLAGS('-fno-common', testflags=True)
+
+ conf.ADD_CFLAGS('-Werror=address', testflags=True)
+ # we add these here to ensure that -Wstrict-prototypes is not set during configure
+ conf.ADD_CFLAGS('-Werror=strict-prototypes -Wstrict-prototypes',
+ testflags=True)
+ conf.ADD_CFLAGS('-Werror=write-strings -Wwrite-strings',
+ testflags=True)
+ conf.ADD_CFLAGS('-Werror-implicit-function-declaration',
+ testflags=True)
+ conf.ADD_CFLAGS('-Werror=pointer-arith -Wpointer-arith',
+ testflags=True)
+ conf.ADD_CFLAGS('-Werror=declaration-after-statement -Wdeclaration-after-statement',
+ testflags=True)
+ conf.ADD_CFLAGS('-Werror=return-type -Wreturn-type',
+ testflags=True)
+ conf.ADD_CFLAGS('-Werror=uninitialized -Wuninitialized',
+ testflags=True)
+
+ conf.ADD_CFLAGS('-Wformat=2 -Wno-format-y2k', testflags=True)
+ # This check is because for ldb_search(), a NULL format string
+ # is not an error, but some compilers complain about that.
+ if CHECK_CFLAGS(conf, ["-Werror=format", "-Wformat=2"], '''
+int testformat(char *format, ...) __attribute__ ((format (__printf__, 1, 2)));
+
+int main(void) {
+ testformat(0);
+ return 0;
+}
+
+'''):
+ if not 'EXTRA_CFLAGS' in conf.env:
+ conf.env['EXTRA_CFLAGS'] = []
+ conf.env['EXTRA_CFLAGS'].extend(TO_LIST("-Werror=format"))
+
+ if Options.options.picky_developer:
+ conf.ADD_NAMED_CFLAGS('PICKY_CFLAGS', '-Werror -Wno-error=deprecated-declarations', testflags=True)
+
+ if Options.options.fatal_errors:
+ conf.ADD_CFLAGS('-Wfatal-errors', testflags=True)
+
+ if Options.options.pedantic:
+ conf.ADD_CFLAGS('-W', testflags=True)
+
+ if Options.options.address_sanitizer:
+ conf.ADD_CFLAGS('-fno-omit-frame-pointer -O1 -fsanitize=address', testflags=True)
+ conf.ADD_LDFLAGS('-fsanitize=address', testflags=True)
+ conf.env['ADDRESS_SANITIZER'] = True
+
+
+ # Let people pass an additional ADDITIONAL_{CFLAGS,LDFLAGS}
+ # environment variables which are only used the for final build.
+ #
+ # The CFLAGS and LDFLAGS environment variables are also
+ # used for the configure checks which might impact their results.
+ conf.add_os_flags('ADDITIONAL_CFLAGS')
+ if conf.env.ADDITIONAL_CFLAGS and conf.CHECK_CFLAGS(conf.env['ADDITIONAL_CFLAGS']):
+ conf.env['EXTRA_CFLAGS'].extend(conf.env['ADDITIONAL_CFLAGS'])
+ conf.add_os_flags('ADDITIONAL_LDFLAGS')
+ if conf.env.ADDITIONAL_LDFLAGS and conf.CHECK_LDFLAGS(conf.env['ADDITIONAL_LDFLAGS']):
+ conf.env['EXTRA_LDFLAGS'].extend(conf.env['ADDITIONAL_LDFLAGS'])
+
+ if path is None:
+ conf.write_config_header('config.h', top=True)
+ else:
+ conf.write_config_header(path)
+ conf.SAMBA_CROSS_CHECK_COMPLETE()
+
+
+@conf
+def CONFIG_PATH(conf, name, default):
+ '''setup a configurable path'''
+ if not name in conf.env:
+ if default[0] == '/':
+ conf.env[name] = default
+ else:
+ conf.env[name] = conf.env['PREFIX'] + default
+
+@conf
+def ADD_NAMED_CFLAGS(conf, name, flags, testflags=False):
+ '''add some CFLAGS to the command line
+ optionally set testflags to ensure all the flags work
+ '''
+ if testflags:
+ ok_flags=[]
+ for f in flags.split():
+ if CHECK_CFLAGS(conf, f):
+ ok_flags.append(f)
+ flags = ok_flags
+ if not name in conf.env:
+ conf.env[name] = []
+ conf.env[name].extend(TO_LIST(flags))
+
+@conf
+def ADD_CFLAGS(conf, flags, testflags=False):
+ '''add some CFLAGS to the command line
+ optionally set testflags to ensure all the flags work
+ '''
+ ADD_NAMED_CFLAGS(conf, 'EXTRA_CFLAGS', flags, testflags=testflags)
+
+@conf
+def ADD_LDFLAGS(conf, flags, testflags=False):
+ '''add some LDFLAGS to the command line
+ optionally set testflags to ensure all the flags work
+
+ this will return the flags that are added, if any
+ '''
+ if testflags:
+ ok_flags=[]
+ for f in flags.split():
+ if CHECK_LDFLAGS(conf, f):
+ ok_flags.append(f)
+ flags = ok_flags
+ if not 'EXTRA_LDFLAGS' in conf.env:
+ conf.env['EXTRA_LDFLAGS'] = []
+ conf.env['EXTRA_LDFLAGS'].extend(TO_LIST(flags))
+ return flags
+
+
+@conf
+def ADD_EXTRA_INCLUDES(conf, includes):
+ '''add some extra include directories to all builds'''
+ if not 'EXTRA_INCLUDES' in conf.env:
+ conf.env['EXTRA_INCLUDES'] = []
+ conf.env['EXTRA_INCLUDES'].extend(TO_LIST(includes))
+
+
+
+def CURRENT_CFLAGS(bld, target, cflags, allow_warnings=False, hide_symbols=False):
+ '''work out the current flags. local flags are added first'''
+ ret = TO_LIST(cflags)
+ if not 'EXTRA_CFLAGS' in bld.env:
+ list = []
+ else:
+ list = bld.env['EXTRA_CFLAGS'];
+ ret.extend(list)
+ if not allow_warnings and 'PICKY_CFLAGS' in bld.env:
+ list = bld.env['PICKY_CFLAGS'];
+ ret.extend(list)
+ if hide_symbols and bld.env.HAVE_VISIBILITY_ATTR:
+ ret.append(bld.env.VISIBILITY_CFLAGS)
+ return ret
+
+
+@conf
+def CHECK_CC_ENV(conf):
+ """trim whitespaces from 'CC'.
+ The build farm sometimes puts a space at the start"""
+ if os.environ.get('CC'):
+ conf.env.CC = TO_LIST(os.environ.get('CC'))
+ if len(conf.env.CC) == 1:
+ # make for nicer logs if just a single command
+ conf.env.CC = conf.env.CC[0]
+
+
+@conf
+def SETUP_CONFIGURE_CACHE(conf, enable):
+ '''enable/disable cache of configure results'''
+ if enable:
+ # when -C is chosen, we will use a private cache and will
+ # not look into system includes. This roughtly matches what
+ # autoconf does with -C
+ cache_path = os.path.join(conf.blddir, '.confcache')
+ mkdir_p(cache_path)
+ Options.cache_global = os.environ['WAFCACHE'] = cache_path
+ else:
+ # when -C is not chosen we will not cache configure checks
+ # We set the recursion limit low to prevent waf from spending
+ # a lot of time on the signatures of the files.
+ Options.cache_global = os.environ['WAFCACHE'] = ''
+ preproc.recursion_limit = 1
+ # in either case we don't need to scan system includes
+ preproc.go_absolute = False
+
+
+@conf
+def SAMBA_CHECK_UNDEFINED_SYMBOL_FLAGS(conf):
+ # we don't want any libraries or modules to rely on runtime
+ # resolution of symbols
+ if not sys.platform.startswith("openbsd"):
+ conf.env.undefined_ldflags = conf.ADD_LDFLAGS('-Wl,-no-undefined', testflags=True)
+
+ if not sys.platform.startswith("openbsd") and conf.env.undefined_ignore_ldflags == []:
+ if conf.CHECK_LDFLAGS(['-undefined', 'dynamic_lookup']):
+ conf.env.undefined_ignore_ldflags = ['-undefined', 'dynamic_lookup']
+
+@conf
+def CHECK_CFG(self, *k, **kw):
+ return self.check_cfg(*k, **kw)
diff --git a/buildtools/wafsamba/samba_autoproto.py b/buildtools/wafsamba/samba_autoproto.py
new file mode 100644
index 0000000..bad627a
--- /dev/null
+++ b/buildtools/wafsamba/samba_autoproto.py
@@ -0,0 +1,23 @@
+# waf build tool for building automatic prototypes from C source
+
+import Build
+from samba_utils import *
+
+def SAMBA_AUTOPROTO(bld, header, source):
+ '''rule for samba prototype generation'''
+ bld.SET_BUILD_GROUP('prototypes')
+ relpath = os_path_relpath(bld.curdir, bld.srcnode.abspath())
+ name = os.path.join(relpath, header)
+ SET_TARGET_TYPE(bld, name, 'PROTOTYPE')
+ t = bld(
+ name = name,
+ source = source,
+ target = header,
+ update_outputs=True,
+ ext_out='.c',
+ before ='cc',
+ rule = '${PERL} "${SCRIPT}/mkproto.pl" --srcdir=.. --builddir=. --public=/dev/null --private="${TGT}" ${SRC}'
+ )
+ t.env.SCRIPT = os.path.join(bld.srcnode.abspath(), 'source4/script')
+Build.BuildContext.SAMBA_AUTOPROTO = SAMBA_AUTOPROTO
+
diff --git a/buildtools/wafsamba/samba_bundled.py b/buildtools/wafsamba/samba_bundled.py
new file mode 100644
index 0000000..515590f
--- /dev/null
+++ b/buildtools/wafsamba/samba_bundled.py
@@ -0,0 +1,257 @@
+# functions to support bundled libraries
+
+from Configure import conf
+import sys, Logs
+from samba_utils import *
+
+def PRIVATE_NAME(bld, name, private_extension, private_library):
+ '''possibly rename a library to include a bundled extension'''
+
+ if not private_library:
+ return name
+
+ # we now use the same private name for libraries as the public name.
+ # see http://git.samba.org/?p=tridge/junkcode.git;a=tree;f=shlib for a
+ # demonstration that this is the right thing to do
+ # also see http://lists.samba.org/archive/samba-technical/2011-January/075816.html
+ if private_extension:
+ return name
+
+ extension = bld.env.PRIVATE_EXTENSION
+
+ if extension and name.startswith('%s' % extension):
+ return name
+
+ if extension and name.endswith('%s' % extension):
+ return name
+
+ return "%s-%s" % (name, extension)
+
+
+def target_in_list(target, lst, default):
+ for l in lst:
+ if target == l:
+ return True
+ if '!' + target == l:
+ return False
+ if l == 'ALL':
+ return True
+ if l == 'NONE':
+ return False
+ return default
+
+
+def BUILTIN_LIBRARY(bld, name):
+ '''return True if a library should be builtin
+ instead of being built as a shared lib'''
+ return target_in_list(name, bld.env.BUILTIN_LIBRARIES, False)
+Build.BuildContext.BUILTIN_LIBRARY = BUILTIN_LIBRARY
+
+
+def BUILTIN_DEFAULT(opt, builtins):
+ '''set a comma separated default list of builtin libraries for this package'''
+ if 'BUILTIN_LIBRARIES_DEFAULT' in Options.options:
+ return
+ Options.options['BUILTIN_LIBRARIES_DEFAULT'] = builtins
+Options.Handler.BUILTIN_DEFAULT = BUILTIN_DEFAULT
+
+
+def PRIVATE_EXTENSION_DEFAULT(opt, extension, noextension=''):
+ '''set a default private library extension'''
+ if 'PRIVATE_EXTENSION_DEFAULT' in Options.options:
+ return
+ Options.options['PRIVATE_EXTENSION_DEFAULT'] = extension
+ Options.options['PRIVATE_EXTENSION_EXCEPTION'] = noextension
+Options.Handler.PRIVATE_EXTENSION_DEFAULT = PRIVATE_EXTENSION_DEFAULT
+
+
+def minimum_library_version(conf, libname, default):
+ '''allow override of mininum system library version'''
+
+ minlist = Options.options.MINIMUM_LIBRARY_VERSION
+ if not minlist:
+ return default
+
+ for m in minlist.split(','):
+ a = m.split(':')
+ if len(a) != 2:
+ Logs.error("Bad syntax for --minimum-library-version of %s" % m)
+ sys.exit(1)
+ if a[0] == libname:
+ return a[1]
+ return default
+
+
+@conf
+def LIB_MAY_BE_BUNDLED(conf, libname):
+ if libname in conf.env.BUNDLED_LIBS:
+ return True
+ if '!%s' % libname in conf.env.BUNDLED_LIBS:
+ return False
+ if 'NONE' in conf.env.BUNDLED_LIBS:
+ return False
+ return True
+
+@conf
+def LIB_MUST_BE_BUNDLED(conf, libname):
+ if libname in conf.env.BUNDLED_LIBS:
+ return True
+ if '!%s' % libname in conf.env.BUNDLED_LIBS:
+ return False
+ if 'ALL' in conf.env.BUNDLED_LIBS:
+ return True
+ return False
+
+@conf
+def LIB_MUST_BE_PRIVATE(conf, libname):
+ return ('ALL' in conf.env.PRIVATE_LIBS or
+ libname in conf.env.PRIVATE_LIBS)
+
+@conf
+def CHECK_PREREQUISITES(conf, prereqs):
+ missing = []
+ for syslib in TO_LIST(prereqs):
+ f = 'FOUND_SYSTEMLIB_%s' % syslib
+ if not f in conf.env:
+ missing.append(syslib)
+ return missing
+
+
+@runonce
+@conf
+def CHECK_BUNDLED_SYSTEM_PKG(conf, libname, minversion='0.0.0',
+ onlyif=None, implied_deps=None, pkg=None):
+ '''check if a library is available as a system library.
+
+ This only tries using pkg-config
+ '''
+ return conf.CHECK_BUNDLED_SYSTEM(libname,
+ minversion=minversion,
+ onlyif=onlyif,
+ implied_deps=implied_deps,
+ pkg=pkg)
+
+@runonce
+@conf
+def CHECK_BUNDLED_SYSTEM(conf, libname, minversion='0.0.0',
+ checkfunctions=None, headers=None, checkcode=None,
+ onlyif=None, implied_deps=None,
+ require_headers=True, pkg=None, set_target=True):
+ '''check if a library is available as a system library.
+ this first tries via pkg-config, then if that fails
+ tries by testing for a specified function in the specified lib
+ '''
+ if conf.LIB_MUST_BE_BUNDLED(libname):
+ return False
+ found = 'FOUND_SYSTEMLIB_%s' % libname
+ if found in conf.env:
+ return conf.env[found]
+
+ def check_functions_headers_code():
+ '''helper function for CHECK_BUNDLED_SYSTEM'''
+ if require_headers and headers and not conf.CHECK_HEADERS(headers, lib=libname):
+ return False
+ if checkfunctions is not None:
+ ok = conf.CHECK_FUNCS_IN(checkfunctions, libname, headers=headers,
+ empty_decl=False, set_target=False)
+ if not ok:
+ return False
+ if checkcode is not None:
+ define='CHECK_BUNDLED_SYSTEM_%s' % libname.upper()
+ ok = conf.CHECK_CODE(checkcode, lib=libname,
+ headers=headers, local_include=False,
+ msg=msg, define=define)
+ conf.CONFIG_RESET(define)
+ if not ok:
+ return False
+ return True
+
+
+ # see if the library should only use a system version if another dependent
+ # system version is found. That prevents possible use of mixed library
+ # versions
+ if onlyif:
+ missing = conf.CHECK_PREREQUISITES(onlyif)
+ if missing:
+ if not conf.LIB_MAY_BE_BUNDLED(libname):
+ Logs.error('ERROR: Use of system library %s depends on missing system library/libraries %r' % (libname, missing))
+ sys.exit(1)
+ conf.env[found] = False
+ return False
+
+ minversion = minimum_library_version(conf, libname, minversion)
+
+ msg = 'Checking for system %s' % libname
+ if minversion != '0.0.0':
+ msg += ' >= %s' % minversion
+
+ uselib_store=libname.upper()
+ if pkg is None:
+ pkg = libname
+
+ # try pkgconfig first
+ if (conf.check_cfg(package=pkg,
+ args='"%s >= %s" --cflags --libs' % (pkg, minversion),
+ msg=msg, uselib_store=uselib_store) and
+ check_functions_headers_code()):
+ if set_target:
+ conf.SET_TARGET_TYPE(libname, 'SYSLIB')
+ conf.env[found] = True
+ if implied_deps:
+ conf.SET_SYSLIB_DEPS(libname, implied_deps)
+ return True
+ if checkfunctions is not None:
+ if check_functions_headers_code():
+ conf.env[found] = True
+ if implied_deps:
+ conf.SET_SYSLIB_DEPS(libname, implied_deps)
+ if set_target:
+ conf.SET_TARGET_TYPE(libname, 'SYSLIB')
+ return True
+ conf.env[found] = False
+ if not conf.LIB_MAY_BE_BUNDLED(libname):
+ Logs.error('ERROR: System library %s of version %s not found, and bundling disabled' % (libname, minversion))
+ sys.exit(1)
+ return False
+
+
+def tuplize_version(version):
+ return tuple([int(x) for x in version.split(".")])
+
+@runonce
+@conf
+def CHECK_BUNDLED_SYSTEM_PYTHON(conf, libname, modulename, minversion='0.0.0'):
+ '''check if a python module is available on the system and
+ has the specified minimum version.
+ '''
+ if conf.LIB_MUST_BE_BUNDLED(libname):
+ return False
+
+ # see if the library should only use a system version if another dependent
+ # system version is found. That prevents possible use of mixed library
+ # versions
+ minversion = minimum_library_version(conf, libname, minversion)
+
+ try:
+ m = __import__(modulename)
+ except ImportError:
+ found = False
+ else:
+ try:
+ version = m.__version__
+ except AttributeError:
+ found = False
+ else:
+ found = tuplize_version(version) >= tuplize_version(minversion)
+ if not found and not conf.LIB_MAY_BE_BUNDLED(libname):
+ Logs.error('ERROR: Python module %s of version %s not found, and bundling disabled' % (libname, minversion))
+ sys.exit(1)
+ return found
+
+
+def NONSHARED_BINARY(bld, name):
+ '''return True if a binary should be built without non-system shared libs'''
+ return target_in_list(name, bld.env.NONSHARED_BINARIES, False)
+Build.BuildContext.NONSHARED_BINARY = NONSHARED_BINARY
+
+
diff --git a/buildtools/wafsamba/samba_conftests.py b/buildtools/wafsamba/samba_conftests.py
new file mode 100644
index 0000000..96fead5
--- /dev/null
+++ b/buildtools/wafsamba/samba_conftests.py
@@ -0,0 +1,589 @@
+# a set of config tests that use the samba_autoconf functions
+# to test for commonly needed configuration options
+
+import os, shutil, re
+import Build, Configure, Utils
+from Configure import conf
+import config_c
+from samba_utils import *
+
+
+def add_option(self, *k, **kw):
+ '''syntax help: provide the "match" attribute to opt.add_option() so that folders can be added to specific config tests'''
+ match = kw.get('match', [])
+ if match:
+ del kw['match']
+ opt = self.parser.add_option(*k, **kw)
+ opt.match = match
+ return opt
+Options.Handler.add_option = add_option
+
+@conf
+def check(self, *k, **kw):
+ '''Override the waf defaults to inject --with-directory options'''
+
+ if not 'env' in kw:
+ kw['env'] = self.env.copy()
+
+ # match the configuration test with speficic options, for example:
+ # --with-libiconv -> Options.options.iconv_open -> "Checking for library iconv"
+ additional_dirs = []
+ if 'msg' in kw:
+ msg = kw['msg']
+ for x in Options.Handler.parser.parser.option_list:
+ if getattr(x, 'match', None) and msg in x.match:
+ d = getattr(Options.options, x.dest, '')
+ if d:
+ additional_dirs.append(d)
+
+ # we add the additional dirs twice: once for the test data, and again if the compilation test suceeds below
+ def add_options_dir(dirs, env):
+ for x in dirs:
+ if not x in env.CPPPATH:
+ env.CPPPATH = [os.path.join(x, 'include')] + env.CPPPATH
+ if not x in env.LIBPATH:
+ env.LIBPATH = [os.path.join(x, 'lib')] + env.LIBPATH
+
+ add_options_dir(additional_dirs, kw['env'])
+
+ self.validate_c(kw)
+ self.check_message_1(kw['msg'])
+ ret = None
+ try:
+ ret = self.run_c_code(*k, **kw)
+ except Configure.ConfigurationError, e:
+ self.check_message_2(kw['errmsg'], 'YELLOW')
+ if 'mandatory' in kw and kw['mandatory']:
+ if Logs.verbose > 1:
+ raise
+ else:
+ self.fatal('the configuration failed (see %r)' % self.log.name)
+ else:
+ kw['success'] = ret
+ self.check_message_2(self.ret_msg(kw['okmsg'], kw))
+
+ # success! keep the CPPPATH/LIBPATH
+ add_options_dir(additional_dirs, self.env)
+
+ self.post_check(*k, **kw)
+ if not kw.get('execute', False):
+ return ret == 0
+ return ret
+
+
+@conf
+def CHECK_ICONV(conf, define='HAVE_NATIVE_ICONV'):
+ '''check if the iconv library is installed
+ optionally pass a define'''
+ if conf.CHECK_FUNCS_IN('iconv_open', 'iconv', checklibc=True, headers='iconv.h'):
+ conf.DEFINE(define, 1)
+ return True
+ return False
+
+
+@conf
+def CHECK_LARGEFILE(conf, define='HAVE_LARGEFILE'):
+ '''see what we need for largefile support'''
+ getconf_cflags = conf.CHECK_COMMAND(['getconf', 'LFS_CFLAGS']);
+ if getconf_cflags is not False:
+ if (conf.CHECK_CODE('return !(sizeof(off_t) >= 8)',
+ define='WORKING_GETCONF_LFS_CFLAGS',
+ execute=True,
+ cflags=getconf_cflags,
+ msg='Checking getconf large file support flags work')):
+ conf.ADD_CFLAGS(getconf_cflags)
+ getconf_cflags_list=TO_LIST(getconf_cflags)
+ for flag in getconf_cflags_list:
+ if flag[:2] == "-D":
+ flag_split = flag[2:].split('=')
+ if len(flag_split) == 1:
+ conf.DEFINE(flag_split[0], '1')
+ else:
+ conf.DEFINE(flag_split[0], flag_split[1])
+
+ if conf.CHECK_CODE('return !(sizeof(off_t) >= 8)',
+ define,
+ execute=True,
+ msg='Checking for large file support without additional flags'):
+ return True
+
+ if conf.CHECK_CODE('return !(sizeof(off_t) >= 8)',
+ define,
+ execute=True,
+ cflags='-D_FILE_OFFSET_BITS=64',
+ msg='Checking for -D_FILE_OFFSET_BITS=64'):
+ conf.DEFINE('_FILE_OFFSET_BITS', 64)
+ return True
+
+ if conf.CHECK_CODE('return !(sizeof(off_t) >= 8)',
+ define,
+ execute=True,
+ cflags='-D_LARGE_FILES',
+ msg='Checking for -D_LARGE_FILES'):
+ conf.DEFINE('_LARGE_FILES', 1)
+ return True
+ return False
+
+
+@conf
+def CHECK_C_PROTOTYPE(conf, function, prototype, define, headers=None, msg=None):
+ '''verify that a C prototype matches the one on the current system'''
+ if not conf.CHECK_DECLS(function, headers=headers):
+ return False
+ if not msg:
+ msg = 'Checking C prototype for %s' % function
+ return conf.CHECK_CODE('%s; void *_x = (void *)%s' % (prototype, function),
+ define=define,
+ local_include=False,
+ headers=headers,
+ link=False,
+ execute=False,
+ msg=msg)
+
+
+@conf
+def CHECK_CHARSET_EXISTS(conf, charset, outcharset='UCS-2LE', headers=None, define=None):
+ '''check that a named charset is able to be used with iconv_open() for conversion
+ to a target charset
+ '''
+ msg = 'Checking if can we convert from %s to %s' % (charset, outcharset)
+ if define is None:
+ define = 'HAVE_CHARSET_%s' % charset.upper().replace('-','_')
+ return conf.CHECK_CODE('''
+ iconv_t cd = iconv_open("%s", "%s");
+ if (cd == 0 || cd == (iconv_t)-1) return -1;
+ ''' % (charset, outcharset),
+ define=define,
+ execute=True,
+ msg=msg,
+ lib='iconv',
+ headers=headers)
+
+def find_config_dir(conf):
+ '''find a directory to run tests in'''
+ k = 0
+ while k < 10000:
+ dir = os.path.join(conf.blddir, '.conf_check_%d' % k)
+ try:
+ shutil.rmtree(dir)
+ except OSError:
+ pass
+ try:
+ os.stat(dir)
+ except:
+ break
+ k += 1
+
+ try:
+ os.makedirs(dir)
+ except:
+ conf.fatal('cannot create a configuration test folder %r' % dir)
+
+ try:
+ os.stat(dir)
+ except:
+ conf.fatal('cannot use the configuration test folder %r' % dir)
+ return dir
+
+@conf
+def CHECK_SHLIB_INTRASINC_NAME_FLAGS(conf, msg):
+ '''
+ check if the waf default flags for setting the name of lib
+ are ok
+ '''
+
+ snip = '''
+int foo(int v) {
+ return v * 2;
+}
+'''
+ return conf.check(features='cc cshlib',vnum="1",fragment=snip,msg=msg)
+
+@conf
+def CHECK_NEED_LC(conf, msg):
+ '''check if we need -lc'''
+
+ dir = find_config_dir(conf)
+
+ env = conf.env
+
+ bdir = os.path.join(dir, 'testbuild2')
+ if not os.path.exists(bdir):
+ os.makedirs(bdir)
+
+
+ subdir = os.path.join(dir, "liblctest")
+
+ os.makedirs(subdir)
+
+ dest = open(os.path.join(subdir, 'liblc1.c'), 'w')
+ dest.write('#include <stdio.h>\nint lib_func(void) { FILE *f = fopen("foo", "r");}\n')
+ dest.close()
+
+ bld = Build.BuildContext()
+ bld.log = conf.log
+ bld.all_envs.update(conf.all_envs)
+ bld.all_envs['default'] = env
+ bld.lst_variants = bld.all_envs.keys()
+ bld.load_dirs(dir, bdir)
+
+ bld.rescan(bld.srcnode)
+
+ bld(features='cc cshlib',
+ source='liblctest/liblc1.c',
+ ldflags=conf.env['EXTRA_LDFLAGS'],
+ target='liblc',
+ name='liblc')
+
+ try:
+ bld.compile()
+ conf.check_message(msg, '', True)
+ return True
+ except:
+ conf.check_message(msg, '', False)
+ return False
+
+
+@conf
+def CHECK_SHLIB_W_PYTHON(conf, msg):
+ '''check if we need -undefined dynamic_lookup'''
+
+ dir = find_config_dir(conf)
+
+ env = conf.env
+
+ snip = '''
+#include <Python.h>
+#include <crt_externs.h>
+#define environ (*_NSGetEnviron())
+
+static PyObject *ldb_module = NULL;
+int foo(int v) {
+ extern char **environ;
+ environ[0] = 1;
+ ldb_module = PyImport_ImportModule("ldb");
+ return v * 2;
+}'''
+ return conf.check(features='cc cshlib',uselib='PYEMBED',fragment=snip,msg=msg)
+
+# this one is quite complex, and should probably be broken up
+# into several parts. I'd quite like to create a set of CHECK_COMPOUND()
+# functions that make writing complex compound tests like this much easier
+@conf
+def CHECK_LIBRARY_SUPPORT(conf, rpath=False, version_script=False, msg=None):
+ '''see if the platform supports building libraries'''
+
+ if msg is None:
+ if rpath:
+ msg = "rpath library support"
+ else:
+ msg = "building library support"
+
+ dir = find_config_dir(conf)
+
+ bdir = os.path.join(dir, 'testbuild')
+ if not os.path.exists(bdir):
+ os.makedirs(bdir)
+
+ env = conf.env
+
+ subdir = os.path.join(dir, "libdir")
+
+ os.makedirs(subdir)
+
+ dest = open(os.path.join(subdir, 'lib1.c'), 'w')
+ dest.write('int lib_func(void) { return 42; }\n')
+ dest.close()
+
+ dest = open(os.path.join(dir, 'main.c'), 'w')
+ dest.write('int main(void) {return !(lib_func() == 42);}\n')
+ dest.close()
+
+ bld = Build.BuildContext()
+ bld.log = conf.log
+ bld.all_envs.update(conf.all_envs)
+ bld.all_envs['default'] = env
+ bld.lst_variants = bld.all_envs.keys()
+ bld.load_dirs(dir, bdir)
+
+ bld.rescan(bld.srcnode)
+
+ ldflags = []
+ if version_script:
+ ldflags.append("-Wl,--version-script=%s/vscript" % bld.path.abspath())
+ dest = open(os.path.join(dir,'vscript'), 'w')
+ dest.write('TEST_1.0A2 { global: *; };\n')
+ dest.close()
+
+ bld(features='cc cshlib',
+ source='libdir/lib1.c',
+ target='libdir/lib1',
+ ldflags=ldflags,
+ name='lib1')
+
+ o = bld(features='cc cprogram',
+ source='main.c',
+ target='prog1',
+ uselib_local='lib1')
+
+ if rpath:
+ o.rpath=os.path.join(bdir, 'default/libdir')
+
+ # compile the program
+ try:
+ bld.compile()
+ except:
+ conf.check_message(msg, '', False)
+ return False
+
+ # path for execution
+ lastprog = o.link_task.outputs[0].abspath(env)
+
+ if not rpath:
+ if 'LD_LIBRARY_PATH' in os.environ:
+ old_ld_library_path = os.environ['LD_LIBRARY_PATH']
+ else:
+ old_ld_library_path = None
+ ADD_LD_LIBRARY_PATH(os.path.join(bdir, 'default/libdir'))
+
+ # we need to run the program, try to get its result
+ args = conf.SAMBA_CROSS_ARGS(msg=msg)
+ proc = Utils.pproc.Popen([lastprog] + args, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE)
+ (out, err) = proc.communicate()
+ w = conf.log.write
+ w(str(out))
+ w('\n')
+ w(str(err))
+ w('\nreturncode %r\n' % proc.returncode)
+ ret = (proc.returncode == 0)
+
+ if not rpath:
+ os.environ['LD_LIBRARY_PATH'] = old_ld_library_path or ''
+
+ conf.check_message(msg, '', ret)
+ return ret
+
+
+
+@conf
+def CHECK_PERL_MANPAGE(conf, msg=None, section=None):
+ '''work out what extension perl uses for manpages'''
+
+ if msg is None:
+ if section:
+ msg = "perl man%s extension" % section
+ else:
+ msg = "perl manpage generation"
+
+ conf.check_message_1(msg)
+
+ dir = find_config_dir(conf)
+
+ bdir = os.path.join(dir, 'testbuild')
+ if not os.path.exists(bdir):
+ os.makedirs(bdir)
+
+ dest = open(os.path.join(bdir, 'Makefile.PL'), 'w')
+ dest.write("""
+use ExtUtils::MakeMaker;
+WriteMakefile(
+ 'NAME' => 'WafTest',
+ 'EXE_FILES' => [ 'WafTest' ]
+);
+""")
+ dest.close()
+ back = os.path.abspath('.')
+ os.chdir(bdir)
+ proc = Utils.pproc.Popen(['perl', 'Makefile.PL'],
+ stdout=Utils.pproc.PIPE,
+ stderr=Utils.pproc.PIPE)
+ (out, err) = proc.communicate()
+ os.chdir(back)
+
+ ret = (proc.returncode == 0)
+ if not ret:
+ conf.check_message_2('not found', color='YELLOW')
+ return
+
+ if section:
+ f = open(os.path.join(bdir,'Makefile'), 'r')
+ man = f.read()
+ f.close()
+ m = re.search('MAN%sEXT\s+=\s+(\w+)' % section, man)
+ if not m:
+ conf.check_message_2('not found', color='YELLOW')
+ return
+ ext = m.group(1)
+ conf.check_message_2(ext)
+ return ext
+
+ conf.check_message_2('ok')
+ return True
+
+
+@conf
+def CHECK_COMMAND(conf, cmd, msg=None, define=None, on_target=True, boolean=False):
+ '''run a command and return result'''
+ if msg is None:
+ msg = 'Checking %s' % ' '.join(cmd)
+ conf.COMPOUND_START(msg)
+ cmd = cmd[:]
+ if on_target:
+ cmd.extend(conf.SAMBA_CROSS_ARGS(msg=msg))
+ try:
+ ret = Utils.cmd_output(cmd)
+ except:
+ conf.COMPOUND_END(False)
+ return False
+ if boolean:
+ conf.COMPOUND_END('ok')
+ if define:
+ conf.DEFINE(define, '1')
+ else:
+ ret = ret.strip()
+ conf.COMPOUND_END(ret)
+ if define:
+ conf.DEFINE(define, ret, quote=True)
+ return ret
+
+
+@conf
+def CHECK_UNAME(conf):
+ '''setup SYSTEM_UNAME_* defines'''
+ ret = True
+ for v in "sysname machine release version".split():
+ if not conf.CHECK_CODE('''
+ struct utsname n;
+ if (uname(&n) == -1) return -1;
+ printf("%%s", n.%s);
+ ''' % v,
+ define='SYSTEM_UNAME_%s' % v.upper(),
+ execute=True,
+ define_ret=True,
+ quote=True,
+ headers='sys/utsname.h',
+ local_include=False,
+ msg="Checking uname %s type" % v):
+ ret = False
+ return ret
+
+@conf
+def CHECK_INLINE(conf):
+ '''check for the right value for inline'''
+ conf.COMPOUND_START('Checking for inline')
+ for i in ['inline', '__inline__', '__inline']:
+ ret = conf.CHECK_CODE('''
+ typedef int foo_t;
+ static %s foo_t static_foo () {return 0; }
+ %s foo_t foo () {return 0; }''' % (i, i),
+ define='INLINE_MACRO',
+ addmain=False,
+ link=False)
+ if ret:
+ if i != 'inline':
+ conf.DEFINE('inline', i, quote=False)
+ break
+ if not ret:
+ conf.COMPOUND_END(ret)
+ else:
+ conf.COMPOUND_END(i)
+ return ret
+
+@conf
+def CHECK_XSLTPROC_MANPAGES(conf):
+ '''check if xsltproc can run with the given stylesheets'''
+
+
+ if not conf.CONFIG_SET('XSLTPROC'):
+ conf.find_program('xsltproc', var='XSLTPROC')
+ if not conf.CONFIG_SET('XSLTPROC'):
+ return False
+
+ s='http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl'
+ conf.CHECK_COMMAND('%s --nonet %s 2> /dev/null' % (conf.env.XSLTPROC, s),
+ msg='Checking for stylesheet %s' % s,
+ define='XSLTPROC_MANPAGES', on_target=False,
+ boolean=True)
+ if not conf.CONFIG_SET('XSLTPROC_MANPAGES'):
+ print "A local copy of the docbook.xsl wasn't found on your system" \
+ " consider installing package like docbook-xsl"
+
+#
+# Determine the standard libpath for the used compiler,
+# so we can later use that to filter out these standard
+# library paths when some tools like cups-config or
+# python-config report standard lib paths with their
+# ldflags (-L...)
+#
+@conf
+def CHECK_STANDARD_LIBPATH(conf):
+ # at least gcc and clang support this:
+ try:
+ cmd = conf.env.CC + ['-print-search-dirs']
+ out = Utils.cmd_output(cmd).split('\n')
+ except ValueError:
+ # option not supported by compiler - use a standard list of directories
+ dirlist = [ '/usr/lib', '/usr/lib64' ]
+ except:
+ raise Utils.WafError('Unexpected error running "%s"' % (cmd))
+ else:
+ dirlist = []
+ for line in out:
+ line = line.strip()
+ if line.startswith("libraries: ="):
+ dirliststr = line[len("libraries: ="):]
+ dirlist = [ os.path.normpath(x) for x in dirliststr.split(':') ]
+ break
+
+ conf.env.STANDARD_LIBPATH = dirlist
+
+
+waf_config_c_parse_flags = config_c.parse_flags;
+def samba_config_c_parse_flags(line1, uselib, env):
+ #
+ # We do a special treatment of the rpath components
+ # in the linkflags line, because currently the upstream
+ # parse_flags function is incomplete with respect to
+ # treatment of the rpath. The remainder of the linkflags
+ # line is later passed to the original funcion.
+ #
+ lst1 = shlex.split(line1)
+ lst2 = []
+ while lst1:
+ x = lst1.pop(0)
+
+ #
+ # NOTE on special treatment of -Wl,-R and -Wl,-rpath:
+ #
+ # It is important to not put a library provided RPATH
+ # into the LINKFLAGS but in the RPATH instead, since
+ # the provided LINKFLAGS get prepended to our own internal
+ # RPATH later, and hence can potentially lead to linking
+ # in too old versions of our internal libs.
+ #
+ # We do this filtering here on our own because of some
+ # bugs in the real parse_flags() function.
+ #
+ if x == '-Wl,-rpath' or x == '-Wl,-R':
+ x = lst1.pop(0)
+ if x.startswith('-Wl,'):
+ rpath = x[4:]
+ else:
+ rpath = x
+ elif x.startswith('-Wl,-R,'):
+ rpath = x[7:]
+ elif x.startswith('-Wl,-R'):
+ rpath = x[6:]
+ elif x.startswith('-Wl,-rpath,'):
+ rpath = x[11:]
+ else:
+ lst2.append(x)
+ continue
+
+ env.append_value('RPATH_' + uselib, rpath)
+
+ line2 = ' '.join(lst2)
+ waf_config_c_parse_flags(line2, uselib, env)
+
+ return
+config_c.parse_flags = samba_config_c_parse_flags
diff --git a/buildtools/wafsamba/samba_cross.py b/buildtools/wafsamba/samba_cross.py
new file mode 100644
index 0000000..3838e34
--- /dev/null
+++ b/buildtools/wafsamba/samba_cross.py
@@ -0,0 +1,134 @@
+# functions for handling cross-compilation
+
+import Utils, Logs, sys, os, Options, re
+from Configure import conf
+
+real_Popen = None
+
+ANSWER_UNKNOWN = (254, "")
+ANSWER_FAIL = (255, "")
+ANSWER_OK = (0, "")
+
+cross_answers_incomplete = False
+
+
+def add_answer(ca_file, msg, answer):
+ '''add an answer to a set of cross answers'''
+ try:
+ f = open(ca_file, 'a')
+ except:
+ Logs.error("Unable to open cross-answers file %s" % ca_file)
+ sys.exit(1)
+ if answer == ANSWER_OK:
+ f.write('%s: OK\n' % msg)
+ elif answer == ANSWER_UNKNOWN:
+ f.write('%s: UNKNOWN\n' % msg)
+ elif answer == ANSWER_FAIL:
+ f.write('%s: FAIL\n' % msg)
+ else:
+ (retcode, retstring) = answer
+ f.write('%s: (%d, "%s")' % (msg, retcode, retstring))
+ f.close()
+
+
+def cross_answer(ca_file, msg):
+ '''return a (retcode,retstring) tuple from a answers file'''
+ try:
+ f = open(ca_file, 'r')
+ except:
+ add_answer(ca_file, msg, ANSWER_UNKNOWN)
+ return ANSWER_UNKNOWN
+ for line in f:
+ line = line.strip()
+ if line == '' or line[0] == '#':
+ continue
+ if line.find(':') != -1:
+ a = line.split(':')
+ thismsg = a[0].strip()
+ if thismsg != msg:
+ continue
+ ans = a[1].strip()
+ if ans == "OK" or ans == "YES":
+ f.close()
+ return ANSWER_OK
+ elif ans == "UNKNOWN":
+ f.close()
+ return ANSWER_UNKNOWN
+ elif ans == "FAIL" or ans == "NO":
+ f.close()
+ return ANSWER_FAIL
+ elif ans[0] == '"':
+ return (0, ans.strip('"'))
+ elif ans[0] == "'":
+ return (0, ans.strip("'"))
+ else:
+ m = re.match('\(\s*(-?\d+)\s*,\s*\"(.*)\"\s*\)', ans)
+ if m:
+ f.close()
+ return (int(m.group(1)), m.group(2))
+ else:
+ raise Utils.WafError("Bad answer format '%s' in %s" % (line, ca_file))
+ f.close()
+ add_answer(ca_file, msg, ANSWER_UNKNOWN)
+ return ANSWER_UNKNOWN
+
+
+class cross_Popen(Utils.pproc.Popen):
+ '''cross-compilation wrapper for Popen'''
+ def __init__(*k, **kw):
+ (obj, args) = k
+
+ if '--cross-execute' in args:
+ # when --cross-execute is set, then change the arguments
+ # to use the cross emulator
+ i = args.index('--cross-execute')
+ newargs = args[i+1].split()
+ newargs.extend(args[0:i])
+ args = newargs
+ elif '--cross-answers' in args:
+ # when --cross-answers is set, then change the arguments
+ # to use the cross answers if available
+ i = args.index('--cross-answers')
+ ca_file = args[i+1]
+ msg = args[i+2]
+ ans = cross_answer(ca_file, msg)
+ if ans == ANSWER_UNKNOWN:
+ global cross_answers_incomplete
+ cross_answers_incomplete = True
+ (retcode, retstring) = ans
+ args = ['/bin/sh', '-c', "echo -n '%s'; exit %d" % (retstring, retcode)]
+ real_Popen.__init__(*(obj, args), **kw)
+
+
+@conf
+def SAMBA_CROSS_ARGS(conf, msg=None):
+ '''get exec_args to pass when running cross compiled binaries'''
+ if not conf.env.CROSS_COMPILE:
+ return []
+
+ global real_Popen
+ if real_Popen is None:
+ real_Popen = Utils.pproc.Popen
+ Utils.pproc.Popen = cross_Popen
+
+ ret = []
+
+ if conf.env.CROSS_EXECUTE:
+ ret.extend(['--cross-execute', conf.env.CROSS_EXECUTE])
+ elif conf.env.CROSS_ANSWERS:
+ if msg is None:
+ raise Utils.WafError("Cannot have NULL msg in cross-answers")
+ ret.extend(['--cross-answers', os.path.join(Options.launch_dir, conf.env.CROSS_ANSWERS), msg])
+
+ if ret == []:
+ raise Utils.WafError("Cannot cross-compile without either --cross-execute or --cross-answers")
+
+ return ret
+
+@conf
+def SAMBA_CROSS_CHECK_COMPLETE(conf):
+ '''check if we have some unanswered questions'''
+ global cross_answers_incomplete
+ if conf.env.CROSS_COMPILE and cross_answers_incomplete:
+ raise Utils.WafError("Cross answers file %s is incomplete" % conf.env.CROSS_ANSWERS)
+ return True
diff --git a/buildtools/wafsamba/samba_deps.py b/buildtools/wafsamba/samba_deps.py
new file mode 100644
index 0000000..3be9956
--- /dev/null
+++ b/buildtools/wafsamba/samba_deps.py
@@ -0,0 +1,1188 @@
+# Samba automatic dependency handling and project rules
+
+import Build, os, sys, re, Environment, Logs, time
+from samba_utils import *
+from samba_autoconf import *
+from samba_bundled import BUILTIN_LIBRARY
+
+@conf
+def ADD_GLOBAL_DEPENDENCY(ctx, dep):
+ '''add a dependency for all binaries and libraries'''
+ if not 'GLOBAL_DEPENDENCIES' in ctx.env:
+ ctx.env.GLOBAL_DEPENDENCIES = []
+ ctx.env.GLOBAL_DEPENDENCIES.append(dep)
+
+
+@conf
+def BREAK_CIRCULAR_LIBRARY_DEPENDENCIES(ctx):
+ '''indicate that circular dependencies between libraries should be broken.'''
+ ctx.env.ALLOW_CIRCULAR_LIB_DEPENDENCIES = True
+
+
+@conf
+def SET_SYSLIB_DEPS(conf, target, deps):
+ '''setup some implied dependencies for a SYSLIB'''
+ cache = LOCAL_CACHE(conf, 'SYSLIB_DEPS')
+ cache[target] = deps
+
+
+def expand_subsystem_deps(bld):
+ '''expand the reverse dependencies resulting from subsystem
+ attributes of modules. This is walking over the complete list
+ of declared subsystems, and expands the samba_deps_extended list for any
+ module<->subsystem dependencies'''
+
+ subsystem_list = LOCAL_CACHE(bld, 'INIT_FUNCTIONS')
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+
+ for subsystem_name in subsystem_list:
+ bld.ASSERT(subsystem_name in targets, "Subsystem target %s not declared" % subsystem_name)
+ type = targets[subsystem_name]
+ if type == 'DISABLED' or type == 'EMPTY':
+ continue
+
+ # for example,
+ # subsystem_name = dcerpc_server (a subsystem)
+ # subsystem = dcerpc_server (a subsystem object)
+ # module_name = rpc_epmapper (a module within the dcerpc_server subsystem)
+ # module = rpc_epmapper (a module object within the dcerpc_server subsystem)
+
+ subsystem = bld.name_to_obj(subsystem_name, bld.env)
+ bld.ASSERT(subsystem is not None, "Unable to find subsystem %s" % subsystem_name)
+ for d in subsystem_list[subsystem_name]:
+ module_name = d['TARGET']
+ module_type = targets[module_name]
+ if module_type in ['DISABLED', 'EMPTY']:
+ continue
+ bld.ASSERT(subsystem is not None,
+ "Subsystem target %s for %s (%s) not found" % (subsystem_name, module_name, module_type))
+ if module_type in ['SUBSYSTEM']:
+ # if a module is a plain object type (not a library) then the
+ # subsystem it is part of needs to have it as a dependency, so targets
+ # that depend on this subsystem get the modules of that subsystem
+ subsystem.samba_deps_extended.append(module_name)
+ subsystem.samba_deps_extended = unique_list(subsystem.samba_deps_extended)
+
+
+
+def build_dependencies(self):
+ '''This builds the dependency list for a target. It runs after all the targets are declared
+
+ The reason this is not just done in the SAMBA_*() rules is that we have no way of knowing
+ the full dependency list for a target until we have all of the targets declared.
+ '''
+
+ if self.samba_type in ['LIBRARY', 'BINARY', 'PYTHON']:
+ self.uselib = list(self.final_syslibs)
+ self.uselib_local = list(self.final_libs)
+ self.add_objects = list(self.final_objects)
+
+ # extra link flags from pkg_config
+ libs = self.final_syslibs.copy()
+
+ (ccflags, ldflags, cpppath) = library_flags(self, list(libs))
+ new_ldflags = getattr(self, 'samba_ldflags', [])[:]
+ new_ldflags.extend(ldflags)
+ self.ldflags = new_ldflags
+
+ if getattr(self, 'allow_undefined_symbols', False) and self.env.undefined_ldflags:
+ for f in self.env.undefined_ldflags:
+ self.ldflags.remove(f)
+
+ if getattr(self, 'allow_undefined_symbols', False) and self.env.undefined_ignore_ldflags:
+ for f in self.env.undefined_ignore_ldflags:
+ self.ldflags.append(f)
+
+ debug('deps: computed dependencies for target %s: uselib=%s uselib_local=%s add_objects=%s',
+ self.sname, self.uselib, self.uselib_local, self.add_objects)
+
+ if self.samba_type in ['SUBSYSTEM']:
+ # this is needed for the ccflags of libs that come from pkg_config
+ self.uselib = list(self.final_syslibs)
+ self.uselib.extend(list(self.direct_syslibs))
+ for lib in self.final_libs:
+ t = self.bld.name_to_obj(lib, self.bld.env)
+ self.uselib.extend(list(t.final_syslibs))
+ self.uselib = unique_list(self.uselib)
+
+ if getattr(self, 'uselib', None):
+ up_list = []
+ for l in self.uselib:
+ up_list.append(l.upper())
+ self.uselib = up_list
+
+
+def build_includes(self):
+ '''This builds the right set of includes for a target.
+
+ One tricky part of this is that the includes= attribute for a
+ target needs to use paths which are relative to that targets
+ declaration directory (which we can get at via t.path).
+
+ The way this works is the includes list gets added as
+ samba_includes in the main build task declaration. Then this
+ function runs after all of the tasks are declared, and it
+ processes the samba_includes attribute to produce a includes=
+ attribute
+ '''
+
+ if getattr(self, 'samba_includes', None) is None:
+ return
+
+ bld = self.bld
+
+ inc_deps = includes_objects(bld, self, set(), {})
+
+ includes = []
+
+ # maybe add local includes
+ if getattr(self, 'local_include', True) and getattr(self, 'local_include_first', True):
+ includes.append('.')
+
+ includes.extend(self.samba_includes_extended)
+
+ if 'EXTRA_INCLUDES' in bld.env and getattr(self, 'global_include', True):
+ includes.extend(bld.env['EXTRA_INCLUDES'])
+
+ includes.append('#')
+
+ inc_set = set()
+ inc_abs = []
+
+ for d in inc_deps:
+ t = bld.name_to_obj(d, bld.env)
+ bld.ASSERT(t is not None, "Unable to find dependency %s for %s" % (d, self.sname))
+ inclist = getattr(t, 'samba_includes_extended', [])[:]
+ if getattr(t, 'local_include', True):
+ inclist.append('.')
+ if inclist == []:
+ continue
+ tpath = t.samba_abspath
+ for inc in inclist:
+ npath = tpath + '/' + inc
+ if not npath in inc_set:
+ inc_abs.append(npath)
+ inc_set.add(npath)
+
+ mypath = self.path.abspath(bld.env)
+ for inc in inc_abs:
+ relpath = os_path_relpath(inc, mypath)
+ includes.append(relpath)
+
+ if getattr(self, 'local_include', True) and not getattr(self, 'local_include_first', True):
+ includes.append('.')
+
+ # now transform the includes list to be relative to the top directory
+ # which is represented by '#' in waf. This allows waf to cache the
+ # includes lists more efficiently
+ includes_top = []
+ for i in includes:
+ if i[0] == '#':
+ # some are already top based
+ includes_top.append(i)
+ continue
+ absinc = os.path.join(self.path.abspath(), i)
+ relinc = os_path_relpath(absinc, self.bld.srcnode.abspath())
+ includes_top.append('#' + relinc)
+
+ self.includes = unique_list(includes_top)
+ debug('deps: includes for target %s: includes=%s',
+ self.sname, self.includes)
+
+
+def add_init_functions(self):
+ '''This builds the right set of init functions'''
+
+ bld = self.bld
+
+ subsystems = LOCAL_CACHE(bld, 'INIT_FUNCTIONS')
+
+ # cope with the separated object lists from BINARY and LIBRARY targets
+ sname = self.sname
+ if sname.endswith('.objlist'):
+ sname = sname[0:-8]
+
+ modules = []
+ if sname in subsystems:
+ modules.append(sname)
+
+ m = getattr(self, 'samba_modules', None)
+ if m is not None:
+ modules.extend(TO_LIST(m))
+
+ m = getattr(self, 'samba_subsystem', None)
+ if m is not None:
+ modules.append(m)
+
+ sentinel = getattr(self, 'init_function_sentinel', 'NULL')
+
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+ cflags = getattr(self, 'samba_cflags', [])[:]
+
+ if modules == []:
+ sname = sname.replace('-','_')
+ sname = sname.replace('/','_')
+ cflags.append('-DSTATIC_%s_MODULES=%s' % (sname, sentinel))
+ if sentinel == 'NULL':
+ proto = "extern void __%s_dummy_module_proto(void)" % (sname)
+ cflags.append('-DSTATIC_%s_MODULES_PROTO=%s' % (sname, proto))
+ self.ccflags = cflags
+ return
+
+ for m in modules:
+ bld.ASSERT(m in subsystems,
+ "No init_function defined for module '%s' in target '%s'" % (m, self.sname))
+ init_fn_list = []
+ for d in subsystems[m]:
+ if targets[d['TARGET']] != 'DISABLED':
+ init_fn_list.append(d['INIT_FUNCTION'])
+ if init_fn_list == []:
+ cflags.append('-DSTATIC_%s_MODULES=%s' % (m, sentinel))
+ if sentinel == 'NULL':
+ proto = "extern void __%s_dummy_module_proto(void)" % (m)
+ cflags.append('-DSTATIC_%s_MODULES_PROTO=%s' % (m, proto))
+ else:
+ cflags.append('-DSTATIC_%s_MODULES=%s' % (m, ','.join(init_fn_list) + ',' + sentinel))
+ proto=''
+ for f in init_fn_list:
+ proto += '_MODULE_PROTO(%s)' % f
+ proto += "extern void __%s_dummy_module_proto(void)" % (m)
+ cflags.append('-DSTATIC_%s_MODULES_PROTO=%s' % (m, proto))
+ self.ccflags = cflags
+
+
+def check_duplicate_sources(bld, tgt_list):
+ '''see if we are compiling the same source file more than once
+ without an allow_duplicates attribute'''
+
+ debug('deps: checking for duplicate sources')
+
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+ ret = True
+
+ global tstart
+
+ for t in tgt_list:
+ source_list = TO_LIST(getattr(t, 'source', ''))
+ tpath = os.path.normpath(os_path_relpath(t.path.abspath(bld.env), t.env.BUILD_DIRECTORY + '/default'))
+ obj_sources = set()
+ for s in source_list:
+ p = os.path.normpath(os.path.join(tpath, s))
+ if p in obj_sources:
+ Logs.error("ERROR: source %s appears twice in target '%s'" % (p, t.sname))
+ sys.exit(1)
+ obj_sources.add(p)
+ t.samba_source_set = obj_sources
+
+ subsystems = {}
+
+ # build a list of targets that each source file is part of
+ for t in tgt_list:
+ sources = []
+ if not targets[t.sname] in [ 'LIBRARY', 'BINARY', 'PYTHON' ]:
+ continue
+ for obj in t.add_objects:
+ t2 = t.bld.name_to_obj(obj, bld.env)
+ source_set = getattr(t2, 'samba_source_set', set())
+ for s in source_set:
+ if not s in subsystems:
+ subsystems[s] = {}
+ if not t.sname in subsystems[s]:
+ subsystems[s][t.sname] = []
+ subsystems[s][t.sname].append(t2.sname)
+
+ for s in subsystems:
+ if len(subsystems[s]) > 1 and Options.options.SHOW_DUPLICATES:
+ Logs.warn("WARNING: source %s is in more than one target: %s" % (s, subsystems[s].keys()))
+ for tname in subsystems[s]:
+ if len(subsystems[s][tname]) > 1:
+ raise Utils.WafError("ERROR: source %s is in more than one subsystem of target '%s': %s" % (s, tname, subsystems[s][tname]))
+
+ return ret
+
+
+def check_orphaned_targets(bld, tgt_list):
+ '''check if any build targets are orphaned'''
+
+ target_dict = LOCAL_CACHE(bld, 'TARGET_TYPE')
+
+ debug('deps: checking for orphaned targets')
+
+ for t in tgt_list:
+ if getattr(t, 'samba_used', False):
+ continue
+ type = target_dict[t.sname]
+ if not type in ['BINARY', 'LIBRARY', 'MODULE', 'ET', 'PYTHON']:
+ if re.search('^PIDL_', t.sname) is None:
+ Logs.warn("Target %s of type %s is unused by any other target" % (t.sname, type))
+
+
+def check_group_ordering(bld, tgt_list):
+ '''see if we have any dependencies that violate the group ordering
+
+ It is an error for a target to depend on a target from a later
+ build group
+ '''
+
+ def group_name(g):
+ tm = bld.task_manager
+ return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0]
+
+ for g in bld.task_manager.groups:
+ gname = group_name(g)
+ for t in g.tasks_gen:
+ t.samba_group = gname
+
+ grp_map = {}
+ idx = 0
+ for g in bld.task_manager.groups:
+ name = group_name(g)
+ grp_map[name] = idx
+ idx += 1
+
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+
+ ret = True
+ for t in tgt_list:
+ tdeps = getattr(t, 'add_objects', []) + getattr(t, 'uselib_local', [])
+ for d in tdeps:
+ t2 = bld.name_to_obj(d, bld.env)
+ if t2 is None:
+ continue
+ map1 = grp_map[t.samba_group]
+ map2 = grp_map[t2.samba_group]
+
+ if map2 > map1:
+ Logs.error("Target %r in build group %r depends on target %r from later build group %r" % (
+ t.sname, t.samba_group, t2.sname, t2.samba_group))
+ ret = False
+
+ return ret
+
+
+def show_final_deps(bld, tgt_list):
+ '''show the final dependencies for all targets'''
+
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+
+ for t in tgt_list:
+ if not targets[t.sname] in ['LIBRARY', 'BINARY', 'PYTHON', 'SUBSYSTEM']:
+ continue
+ debug('deps: final dependencies for target %s: uselib=%s uselib_local=%s add_objects=%s',
+ t.sname, t.uselib, getattr(t, 'uselib_local', []), getattr(t, 'add_objects', []))
+
+
+def add_samba_attributes(bld, tgt_list):
+ '''ensure a target has a the required samba attributes'''
+
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+
+ for t in tgt_list:
+ if t.name != '':
+ t.sname = t.name
+ else:
+ t.sname = t.target
+ t.samba_type = targets[t.sname]
+ t.samba_abspath = t.path.abspath(bld.env)
+ t.samba_deps_extended = t.samba_deps[:]
+ t.samba_includes_extended = TO_LIST(t.samba_includes)[:]
+ t.ccflags = getattr(t, 'samba_cflags', '')
+
+def replace_grouping_libraries(bld, tgt_list):
+ '''replace dependencies based on grouping libraries
+
+ If a library is marked as a grouping library, then any target that
+ depends on a subsystem that is part of that grouping library gets
+ that dependency replaced with a dependency on the grouping library
+ '''
+
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+
+ grouping = {}
+
+ # find our list of grouping libraries, mapped from the subsystems they depend on
+ for t in tgt_list:
+ if not getattr(t, 'grouping_library', False):
+ continue
+ for dep in t.samba_deps_extended:
+ bld.ASSERT(dep in targets, "grouping library target %s not declared in %s" % (dep, t.sname))
+ if targets[dep] == 'SUBSYSTEM':
+ grouping[dep] = t.sname
+
+ # now replace any dependencies on elements of grouping libraries
+ for t in tgt_list:
+ for i in range(len(t.samba_deps_extended)):
+ dep = t.samba_deps_extended[i]
+ if dep in grouping:
+ if t.sname != grouping[dep]:
+ debug("deps: target %s: replacing dependency %s with grouping library %s" % (t.sname, dep, grouping[dep]))
+ t.samba_deps_extended[i] = grouping[dep]
+
+
+
+def build_direct_deps(bld, tgt_list):
+ '''build the direct_objects and direct_libs sets for each target'''
+
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+ syslib_deps = LOCAL_CACHE(bld, 'SYSLIB_DEPS')
+
+ global_deps = bld.env.GLOBAL_DEPENDENCIES
+ global_deps_exclude = set()
+ for dep in global_deps:
+ t = bld.name_to_obj(dep, bld.env)
+ for d in t.samba_deps:
+ # prevent loops from the global dependencies list
+ global_deps_exclude.add(d)
+ global_deps_exclude.add(d + '.objlist')
+
+ for t in tgt_list:
+ t.direct_objects = set()
+ t.direct_libs = set()
+ t.direct_syslibs = set()
+ deps = t.samba_deps_extended[:]
+ if getattr(t, 'samba_use_global_deps', False) and not t.sname in global_deps_exclude:
+ deps.extend(global_deps)
+ for d in deps:
+ if d == t.sname: continue
+ if not d in targets:
+ Logs.error("Unknown dependency '%s' in '%s'" % (d, t.sname))
+ sys.exit(1)
+ if targets[d] in [ 'EMPTY', 'DISABLED' ]:
+ continue
+ if targets[d] == 'PYTHON' and targets[t.sname] != 'PYTHON' and t.sname.find('.objlist') == -1:
+ # this check should be more restrictive, but for now we have pidl-generated python
+ # code that directly depends on other python modules
+ Logs.error('ERROR: Target %s has dependency on python module %s' % (t.sname, d))
+ sys.exit(1)
+ if targets[d] == 'SYSLIB':
+ t.direct_syslibs.add(d)
+ if d in syslib_deps:
+ for implied in TO_LIST(syslib_deps[d]):
+ if BUILTIN_LIBRARY(bld, implied):
+ t.direct_objects.add(implied)
+ elif targets[implied] == 'SYSLIB':
+ t.direct_syslibs.add(implied)
+ elif targets[implied] in ['LIBRARY', 'MODULE']:
+ t.direct_libs.add(implied)
+ else:
+ Logs.error('Implied dependency %s in %s is of type %s' % (
+ implied, t.sname, targets[implied]))
+ sys.exit(1)
+ continue
+ t2 = bld.name_to_obj(d, bld.env)
+ if t2 is None:
+ Logs.error("no task %s of type %s in %s" % (d, targets[d], t.sname))
+ sys.exit(1)
+ if t2.samba_type in [ 'LIBRARY', 'MODULE' ]:
+ t.direct_libs.add(d)
+ elif t2.samba_type in [ 'SUBSYSTEM', 'ASN1', 'PYTHON' ]:
+ t.direct_objects.add(d)
+ debug('deps: built direct dependencies')
+
+
+def dependency_loop(loops, t, target):
+ '''add a dependency loop to the loops dictionary'''
+ if t.sname == target:
+ return
+ if not target in loops:
+ loops[target] = set()
+ if not t.sname in loops[target]:
+ loops[target].add(t.sname)
+
+
+def indirect_libs(bld, t, chain, loops):
+ '''recursively calculate the indirect library dependencies for a target
+
+ An indirect library is a library that results from a dependency on
+ a subsystem
+ '''
+
+ ret = getattr(t, 'indirect_libs', None)
+ if ret is not None:
+ return ret
+
+ ret = set()
+ for obj in t.direct_objects:
+ if obj in chain:
+ dependency_loop(loops, t, obj)
+ continue
+ chain.add(obj)
+ t2 = bld.name_to_obj(obj, bld.env)
+ r2 = indirect_libs(bld, t2, chain, loops)
+ chain.remove(obj)
+ ret = ret.union(t2.direct_libs)
+ ret = ret.union(r2)
+
+ for obj in indirect_objects(bld, t, set(), loops):
+ if obj in chain:
+ dependency_loop(loops, t, obj)
+ continue
+ chain.add(obj)
+ t2 = bld.name_to_obj(obj, bld.env)
+ r2 = indirect_libs(bld, t2, chain, loops)
+ chain.remove(obj)
+ ret = ret.union(t2.direct_libs)
+ ret = ret.union(r2)
+
+ t.indirect_libs = ret
+
+ return ret
+
+
+def indirect_objects(bld, t, chain, loops):
+ '''recursively calculate the indirect object dependencies for a target
+
+ indirect objects are the set of objects from expanding the
+ subsystem dependencies
+ '''
+
+ ret = getattr(t, 'indirect_objects', None)
+ if ret is not None: return ret
+
+ ret = set()
+ for lib in t.direct_objects:
+ if lib in chain:
+ dependency_loop(loops, t, lib)
+ continue
+ chain.add(lib)
+ t2 = bld.name_to_obj(lib, bld.env)
+ r2 = indirect_objects(bld, t2, chain, loops)
+ chain.remove(lib)
+ ret = ret.union(t2.direct_objects)
+ ret = ret.union(r2)
+
+ t.indirect_objects = ret
+ return ret
+
+
+def extended_objects(bld, t, chain):
+ '''recursively calculate the extended object dependencies for a target
+
+ extended objects are the union of:
+ - direct objects
+ - indirect objects
+ - direct and indirect objects of all direct and indirect libraries
+ '''
+
+ ret = getattr(t, 'extended_objects', None)
+ if ret is not None: return ret
+
+ ret = set()
+ ret = ret.union(t.final_objects)
+
+ for lib in t.final_libs:
+ if lib in chain:
+ continue
+ t2 = bld.name_to_obj(lib, bld.env)
+ chain.add(lib)
+ r2 = extended_objects(bld, t2, chain)
+ chain.remove(lib)
+ ret = ret.union(t2.final_objects)
+ ret = ret.union(r2)
+
+ t.extended_objects = ret
+ return ret
+
+
+def includes_objects(bld, t, chain, inc_loops):
+ '''recursively calculate the includes object dependencies for a target
+
+ includes dependencies come from either library or object dependencies
+ '''
+ ret = getattr(t, 'includes_objects', None)
+ if ret is not None:
+ return ret
+
+ ret = t.direct_objects.copy()
+ ret = ret.union(t.direct_libs)
+
+ for obj in t.direct_objects:
+ if obj in chain:
+ dependency_loop(inc_loops, t, obj)
+ continue
+ chain.add(obj)
+ t2 = bld.name_to_obj(obj, bld.env)
+ r2 = includes_objects(bld, t2, chain, inc_loops)
+ chain.remove(obj)
+ ret = ret.union(t2.direct_objects)
+ ret = ret.union(r2)
+
+ for lib in t.direct_libs:
+ if lib in chain:
+ dependency_loop(inc_loops, t, lib)
+ continue
+ chain.add(lib)
+ t2 = bld.name_to_obj(lib, bld.env)
+ if t2 is None:
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+ Logs.error('Target %s of type %s not found in direct_libs for %s' % (
+ lib, targets[lib], t.sname))
+ sys.exit(1)
+ r2 = includes_objects(bld, t2, chain, inc_loops)
+ chain.remove(lib)
+ ret = ret.union(t2.direct_objects)
+ ret = ret.union(r2)
+
+ t.includes_objects = ret
+ return ret
+
+
+def break_dependency_loops(bld, tgt_list):
+ '''find and break dependency loops'''
+ loops = {}
+ inc_loops = {}
+
+ # build up the list of loops
+ for t in tgt_list:
+ indirect_objects(bld, t, set(), loops)
+ indirect_libs(bld, t, set(), loops)
+ includes_objects(bld, t, set(), inc_loops)
+
+ # break the loops
+ for t in tgt_list:
+ if t.sname in loops:
+ for attr in ['direct_objects', 'indirect_objects', 'direct_libs', 'indirect_libs']:
+ objs = getattr(t, attr, set())
+ setattr(t, attr, objs.difference(loops[t.sname]))
+
+ for loop in loops:
+ debug('deps: Found dependency loops for target %s : %s', loop, loops[loop])
+
+ for loop in inc_loops:
+ debug('deps: Found include loops for target %s : %s', loop, inc_loops[loop])
+
+ # expand the loops mapping by one level
+ for loop in loops.copy():
+ for tgt in loops[loop]:
+ if tgt in loops:
+ loops[loop] = loops[loop].union(loops[tgt])
+
+ for loop in inc_loops.copy():
+ for tgt in inc_loops[loop]:
+ if tgt in inc_loops:
+ inc_loops[loop] = inc_loops[loop].union(inc_loops[tgt])
+
+
+ # expand indirect subsystem and library loops
+ for loop in loops.copy():
+ t = bld.name_to_obj(loop, bld.env)
+ if t.samba_type in ['SUBSYSTEM']:
+ loops[loop] = loops[loop].union(t.indirect_objects)
+ loops[loop] = loops[loop].union(t.direct_objects)
+ if t.samba_type in ['LIBRARY','PYTHON']:
+ loops[loop] = loops[loop].union(t.indirect_libs)
+ loops[loop] = loops[loop].union(t.direct_libs)
+ if loop in loops[loop]:
+ loops[loop].remove(loop)
+
+ # expand indirect includes loops
+ for loop in inc_loops.copy():
+ t = bld.name_to_obj(loop, bld.env)
+ inc_loops[loop] = inc_loops[loop].union(t.includes_objects)
+ if loop in inc_loops[loop]:
+ inc_loops[loop].remove(loop)
+
+ # add in the replacement dependencies
+ for t in tgt_list:
+ for loop in loops:
+ for attr in ['indirect_objects', 'indirect_libs']:
+ objs = getattr(t, attr, set())
+ if loop in objs:
+ diff = loops[loop].difference(objs)
+ if t.sname in diff:
+ diff.remove(t.sname)
+ if diff:
+ debug('deps: Expanded target %s of type %s from loop %s by %s', t.sname, t.samba_type, loop, diff)
+ objs = objs.union(diff)
+ setattr(t, attr, objs)
+
+ for loop in inc_loops:
+ objs = getattr(t, 'includes_objects', set())
+ if loop in objs:
+ diff = inc_loops[loop].difference(objs)
+ if t.sname in diff:
+ diff.remove(t.sname)
+ if diff:
+ debug('deps: Expanded target %s includes of type %s from loop %s by %s', t.sname, t.samba_type, loop, diff)
+ objs = objs.union(diff)
+ setattr(t, 'includes_objects', objs)
+
+
+def reduce_objects(bld, tgt_list):
+ '''reduce objects by looking for indirect object dependencies'''
+ rely_on = {}
+
+ for t in tgt_list:
+ t.extended_objects = None
+
+ changed = False
+
+ for type in ['BINARY', 'PYTHON', 'LIBRARY']:
+ for t in tgt_list:
+ if t.samba_type != type: continue
+ # if we will indirectly link to a target then we don't need it
+ new = t.final_objects.copy()
+ for l in t.final_libs:
+ t2 = bld.name_to_obj(l, bld.env)
+ t2_obj = extended_objects(bld, t2, set())
+ dup = new.intersection(t2_obj)
+ if t.sname in rely_on:
+ dup = dup.difference(rely_on[t.sname])
+ if dup:
+ debug('deps: removing dups from %s of type %s: %s also in %s %s',
+ t.sname, t.samba_type, dup, t2.samba_type, l)
+ new = new.difference(dup)
+ changed = True
+ if not l in rely_on:
+ rely_on[l] = set()
+ rely_on[l] = rely_on[l].union(dup)
+ t.final_objects = new
+
+ if not changed:
+ return False
+
+ # add back in any objects that were relied upon by the reduction rules
+ for r in rely_on:
+ t = bld.name_to_obj(r, bld.env)
+ t.final_objects = t.final_objects.union(rely_on[r])
+
+ return True
+
+
+def show_library_loop(bld, lib1, lib2, path, seen):
+ '''show the detailed path of a library loop between lib1 and lib2'''
+
+ t = bld.name_to_obj(lib1, bld.env)
+ if not lib2 in getattr(t, 'final_libs', set()):
+ return
+
+ for d in t.samba_deps_extended:
+ if d in seen:
+ continue
+ seen.add(d)
+ path2 = path + '=>' + d
+ if d == lib2:
+ Logs.warn('library loop path: ' + path2)
+ return
+ show_library_loop(bld, d, lib2, path2, seen)
+ seen.remove(d)
+
+
+def calculate_final_deps(bld, tgt_list, loops):
+ '''calculate the final library and object dependencies'''
+ for t in tgt_list:
+ # start with the maximum possible list
+ t.final_libs = t.direct_libs.union(indirect_libs(bld, t, set(), loops))
+ t.final_objects = t.direct_objects.union(indirect_objects(bld, t, set(), loops))
+
+ for t in tgt_list:
+ # don't depend on ourselves
+ if t.sname in t.final_libs:
+ t.final_libs.remove(t.sname)
+ if t.sname in t.final_objects:
+ t.final_objects.remove(t.sname)
+
+ # handle any non-shared binaries
+ for t in tgt_list:
+ if t.samba_type == 'BINARY' and bld.NONSHARED_BINARY(t.sname):
+ subsystem_list = LOCAL_CACHE(bld, 'INIT_FUNCTIONS')
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+
+ # replace lib deps with objlist deps
+ for l in t.final_libs:
+ objname = l + '.objlist'
+ t2 = bld.name_to_obj(objname, bld.env)
+ if t2 is None:
+ Logs.error('ERROR: subsystem %s not found' % objname)
+ sys.exit(1)
+ t.final_objects.add(objname)
+ t.final_objects = t.final_objects.union(extended_objects(bld, t2, set()))
+ if l in subsystem_list:
+ # its a subsystem - we also need the contents of any modules
+ for d in subsystem_list[l]:
+ module_name = d['TARGET']
+ if targets[module_name] == 'LIBRARY':
+ objname = module_name + '.objlist'
+ elif targets[module_name] == 'SUBSYSTEM':
+ objname = module_name
+ else:
+ continue
+ t2 = bld.name_to_obj(objname, bld.env)
+ if t2 is None:
+ Logs.error('ERROR: subsystem %s not found' % objname)
+ sys.exit(1)
+ t.final_objects.add(objname)
+ t.final_objects = t.final_objects.union(extended_objects(bld, t2, set()))
+ t.final_libs = set()
+
+ # find any library loops
+ for t in tgt_list:
+ if t.samba_type in ['LIBRARY', 'PYTHON']:
+ for l in t.final_libs.copy():
+ t2 = bld.name_to_obj(l, bld.env)
+ if t.sname in t2.final_libs:
+ if getattr(bld.env, "ALLOW_CIRCULAR_LIB_DEPENDENCIES", False):
+ # we could break this in either direction. If one of the libraries
+ # has a version number, and will this be distributed publicly, then
+ # we should make it the lower level library in the DAG
+ Logs.warn('deps: removing library loop %s from %s' % (t.sname, t2.sname))
+ dependency_loop(loops, t, t2.sname)
+ t2.final_libs.remove(t.sname)
+ else:
+ Logs.error('ERROR: circular library dependency between %s and %s'
+ % (t.sname, t2.sname))
+ show_library_loop(bld, t.sname, t2.sname, t.sname, set())
+ show_library_loop(bld, t2.sname, t.sname, t2.sname, set())
+ sys.exit(1)
+
+ for loop in loops:
+ debug('deps: Found dependency loops for target %s : %s', loop, loops[loop])
+
+ # we now need to make corrections for any library loops we broke up
+ # any target that depended on the target of the loop and doesn't
+ # depend on the source of the loop needs to get the loop source added
+ for type in ['BINARY','PYTHON','LIBRARY','BINARY']:
+ for t in tgt_list:
+ if t.samba_type != type: continue
+ for loop in loops:
+ if loop in t.final_libs:
+ diff = loops[loop].difference(t.final_libs)
+ if t.sname in diff:
+ diff.remove(t.sname)
+ if t.sname in diff:
+ diff.remove(t.sname)
+ # make sure we don't recreate the loop again!
+ for d in diff.copy():
+ t2 = bld.name_to_obj(d, bld.env)
+ if t2.samba_type == 'LIBRARY':
+ if t.sname in t2.final_libs:
+ debug('deps: removing expansion %s from %s', d, t.sname)
+ diff.remove(d)
+ if diff:
+ debug('deps: Expanded target %s by loop %s libraries (loop %s) %s', t.sname, loop,
+ loops[loop], diff)
+ t.final_libs = t.final_libs.union(diff)
+
+ # remove objects that are also available in linked libs
+ count = 0
+ while reduce_objects(bld, tgt_list):
+ count += 1
+ if count > 100:
+ Logs.warn("WARNING: Unable to remove all inter-target object duplicates")
+ break
+ debug('deps: Object reduction took %u iterations', count)
+
+ # add in any syslib dependencies
+ for t in tgt_list:
+ if not t.samba_type in ['BINARY','PYTHON','LIBRARY','SUBSYSTEM']:
+ continue
+ syslibs = set()
+ for d in t.final_objects:
+ t2 = bld.name_to_obj(d, bld.env)
+ syslibs = syslibs.union(t2.direct_syslibs)
+ # this adds the indirect syslibs as well, which may not be needed
+ # depending on the linker flags
+ for d in t.final_libs:
+ t2 = bld.name_to_obj(d, bld.env)
+ syslibs = syslibs.union(t2.direct_syslibs)
+ t.final_syslibs = syslibs
+
+
+ # find any unresolved library loops
+ lib_loop_error = False
+ for t in tgt_list:
+ if t.samba_type in ['LIBRARY', 'PYTHON']:
+ for l in t.final_libs.copy():
+ t2 = bld.name_to_obj(l, bld.env)
+ if t.sname in t2.final_libs:
+ Logs.error('ERROR: Unresolved library loop %s from %s' % (t.sname, t2.sname))
+ lib_loop_error = True
+ if lib_loop_error:
+ sys.exit(1)
+
+ debug('deps: removed duplicate dependencies')
+
+
+def show_dependencies(bld, target, seen):
+ '''recursively show the dependencies of target'''
+
+ if target in seen:
+ return
+
+ t = bld.name_to_obj(target, bld.env)
+ if t is None:
+ Logs.error("ERROR: Unable to find target '%s'" % target)
+ sys.exit(1)
+
+ Logs.info('%s(OBJECTS): %s' % (target, t.direct_objects))
+ Logs.info('%s(LIBS): %s' % (target, t.direct_libs))
+ Logs.info('%s(SYSLIBS): %s' % (target, t.direct_syslibs))
+
+ seen.add(target)
+
+ for t2 in t.direct_objects:
+ show_dependencies(bld, t2, seen)
+
+
+def show_object_duplicates(bld, tgt_list):
+ '''show a list of object files that are included in more than
+ one library or binary'''
+
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+
+ used_by = {}
+
+ Logs.info("showing duplicate objects")
+
+ for t in tgt_list:
+ if not targets[t.sname] in [ 'LIBRARY', 'PYTHON' ]:
+ continue
+ for n in getattr(t, 'final_objects', set()):
+ t2 = bld.name_to_obj(n, bld.env)
+ if not n in used_by:
+ used_by[n] = set()
+ used_by[n].add(t.sname)
+
+ for n in used_by:
+ if len(used_by[n]) > 1:
+ Logs.info("target '%s' is used by %s" % (n, used_by[n]))
+
+ Logs.info("showing indirect dependency counts (sorted by count)")
+
+ def indirect_count(t1, t2):
+ return len(t2.indirect_objects) - len(t1.indirect_objects)
+
+ sorted_list = sorted(tgt_list, cmp=indirect_count)
+ for t in sorted_list:
+ if len(t.indirect_objects) > 1:
+ Logs.info("%s depends on %u indirect objects" % (t.sname, len(t.indirect_objects)))
+
+
+######################################################################
+# this provides a way to save our dependency calculations between runs
+savedeps_version = 3
+savedeps_inputs = ['samba_deps', 'samba_includes', 'local_include', 'local_include_first', 'samba_cflags',
+ 'source', 'grouping_library', 'samba_ldflags', 'allow_undefined_symbols',
+ 'use_global_deps', 'global_include' ]
+savedeps_outputs = ['uselib', 'uselib_local', 'add_objects', 'includes', 'ccflags', 'ldflags', 'samba_deps_extended']
+savedeps_outenv = ['INC_PATHS']
+savedeps_envvars = ['NONSHARED_BINARIES', 'GLOBAL_DEPENDENCIES', 'EXTRA_CFLAGS', 'EXTRA_LDFLAGS', 'EXTRA_INCLUDES' ]
+savedeps_caches = ['GLOBAL_DEPENDENCIES', 'TARGET_TYPE', 'INIT_FUNCTIONS', 'SYSLIB_DEPS']
+savedeps_files = ['buildtools/wafsamba/samba_deps.py']
+
+def save_samba_deps(bld, tgt_list):
+ '''save the dependency calculations between builds, to make
+ further builds faster'''
+ denv = Environment.Environment()
+
+ denv.version = savedeps_version
+ denv.savedeps_inputs = savedeps_inputs
+ denv.savedeps_outputs = savedeps_outputs
+ denv.input = {}
+ denv.output = {}
+ denv.outenv = {}
+ denv.caches = {}
+ denv.envvar = {}
+ denv.files = {}
+
+ for f in savedeps_files:
+ denv.files[f] = os.stat(os.path.join(bld.srcnode.abspath(), f)).st_mtime
+
+ for c in savedeps_caches:
+ denv.caches[c] = LOCAL_CACHE(bld, c)
+
+ for e in savedeps_envvars:
+ denv.envvar[e] = bld.env[e]
+
+ for t in tgt_list:
+ # save all the input attributes for each target
+ tdeps = {}
+ for attr in savedeps_inputs:
+ v = getattr(t, attr, None)
+ if v is not None:
+ tdeps[attr] = v
+ if tdeps != {}:
+ denv.input[t.sname] = tdeps
+
+ # save all the output attributes for each target
+ tdeps = {}
+ for attr in savedeps_outputs:
+ v = getattr(t, attr, None)
+ if v is not None:
+ tdeps[attr] = v
+ if tdeps != {}:
+ denv.output[t.sname] = tdeps
+
+ tdeps = {}
+ for attr in savedeps_outenv:
+ if attr in t.env:
+ tdeps[attr] = t.env[attr]
+ if tdeps != {}:
+ denv.outenv[t.sname] = tdeps
+
+ depsfile = os.path.join(bld.bdir, "sambadeps")
+ denv.store_fast(depsfile)
+
+
+
+def load_samba_deps(bld, tgt_list):
+ '''load a previous set of build dependencies if possible'''
+ depsfile = os.path.join(bld.bdir, "sambadeps")
+ denv = Environment.Environment()
+ try:
+ debug('deps: checking saved dependencies')
+ denv.load_fast(depsfile)
+ if (denv.version != savedeps_version or
+ denv.savedeps_inputs != savedeps_inputs or
+ denv.savedeps_outputs != savedeps_outputs):
+ return False
+ except Exception:
+ return False
+
+ # check if critical files have changed
+ for f in savedeps_files:
+ if f not in denv.files:
+ return False
+ if denv.files[f] != os.stat(os.path.join(bld.srcnode.abspath(), f)).st_mtime:
+ return False
+
+ # check if caches are the same
+ for c in savedeps_caches:
+ if c not in denv.caches or denv.caches[c] != LOCAL_CACHE(bld, c):
+ return False
+
+ # check if caches are the same
+ for e in savedeps_envvars:
+ if e not in denv.envvar or denv.envvar[e] != bld.env[e]:
+ return False
+
+ # check inputs are the same
+ for t in tgt_list:
+ tdeps = {}
+ for attr in savedeps_inputs:
+ v = getattr(t, attr, None)
+ if v is not None:
+ tdeps[attr] = v
+ if t.sname in denv.input:
+ olddeps = denv.input[t.sname]
+ else:
+ olddeps = {}
+ if tdeps != olddeps:
+ #print '%s: \ntdeps=%s \nodeps=%s' % (t.sname, tdeps, olddeps)
+ return False
+
+ # put outputs in place
+ for t in tgt_list:
+ if not t.sname in denv.output: continue
+ tdeps = denv.output[t.sname]
+ for a in tdeps:
+ setattr(t, a, tdeps[a])
+
+ # put output env vars in place
+ for t in tgt_list:
+ if not t.sname in denv.outenv: continue
+ tdeps = denv.outenv[t.sname]
+ for a in tdeps:
+ t.env[a] = tdeps[a]
+
+ debug('deps: loaded saved dependencies')
+ return True
+
+
+
+def check_project_rules(bld):
+ '''check the project rules - ensuring the targets are sane'''
+
+ loops = {}
+ inc_loops = {}
+
+ tgt_list = get_tgt_list(bld)
+
+ add_samba_attributes(bld, tgt_list)
+
+ force_project_rules = (Options.options.SHOWDEPS or
+ Options.options.SHOW_DUPLICATES)
+
+ if not force_project_rules and load_samba_deps(bld, tgt_list):
+ return
+
+ global tstart
+ tstart = time.clock()
+
+ bld.new_rules = True
+ Logs.info("Checking project rules ...")
+
+ debug('deps: project rules checking started')
+
+ expand_subsystem_deps(bld)
+
+ debug("deps: expand_subsystem_deps: %f" % (time.clock() - tstart))
+
+ replace_grouping_libraries(bld, tgt_list)
+
+ debug("deps: replace_grouping_libraries: %f" % (time.clock() - tstart))
+
+ build_direct_deps(bld, tgt_list)
+
+ debug("deps: build_direct_deps: %f" % (time.clock() - tstart))
+
+ break_dependency_loops(bld, tgt_list)
+
+ debug("deps: break_dependency_loops: %f" % (time.clock() - tstart))
+
+ if Options.options.SHOWDEPS:
+ show_dependencies(bld, Options.options.SHOWDEPS, set())
+
+ calculate_final_deps(bld, tgt_list, loops)
+
+ debug("deps: calculate_final_deps: %f" % (time.clock() - tstart))
+
+ if Options.options.SHOW_DUPLICATES:
+ show_object_duplicates(bld, tgt_list)
+
+ # run the various attribute generators
+ for f in [ build_dependencies, build_includes, add_init_functions ]:
+ debug('deps: project rules checking %s', f)
+ for t in tgt_list: f(t)
+ debug("deps: %s: %f" % (f, time.clock() - tstart))
+
+ debug('deps: project rules stage1 completed')
+
+ #check_orphaned_targets(bld, tgt_list)
+
+ if not check_duplicate_sources(bld, tgt_list):
+ Logs.error("Duplicate sources present - aborting")
+ sys.exit(1)
+
+ debug("deps: check_duplicate_sources: %f" % (time.clock() - tstart))
+
+ if not check_group_ordering(bld, tgt_list):
+ Logs.error("Bad group ordering - aborting")
+ sys.exit(1)
+
+ debug("deps: check_group_ordering: %f" % (time.clock() - tstart))
+
+ show_final_deps(bld, tgt_list)
+
+ debug("deps: show_final_deps: %f" % (time.clock() - tstart))
+
+ debug('deps: project rules checking completed - %u targets checked',
+ len(tgt_list))
+
+ if not bld.is_install:
+ save_samba_deps(bld, tgt_list)
+
+ debug("deps: save_samba_deps: %f" % (time.clock() - tstart))
+
+ Logs.info("Project rules pass")
+
+
+def CHECK_PROJECT_RULES(bld):
+ '''enable checking of project targets for sanity'''
+ if bld.env.added_project_rules:
+ return
+ bld.env.added_project_rules = True
+ bld.add_pre_fun(check_project_rules)
+Build.BuildContext.CHECK_PROJECT_RULES = CHECK_PROJECT_RULES
+
+
diff --git a/buildtools/wafsamba/samba_dist.py b/buildtools/wafsamba/samba_dist.py
new file mode 100644
index 0000000..aecacb7
--- /dev/null
+++ b/buildtools/wafsamba/samba_dist.py
@@ -0,0 +1,251 @@
+# customised version of 'waf dist' for Samba tools
+# uses git ls-files to get file lists
+
+import Utils, os, sys, tarfile, stat, Scripting, Logs, Options
+from samba_utils import *
+
+dist_dirs = None
+dist_files = None
+dist_blacklist = ""
+
+def add_symlink(tar, fname, abspath, basedir):
+ '''handle symlinks to directories that may move during packaging'''
+ if not os.path.islink(abspath):
+ return False
+ tinfo = tar.gettarinfo(name=abspath, arcname=fname)
+ tgt = os.readlink(abspath)
+
+ if dist_dirs:
+ # we need to find the target relative to the main directory
+ # this is here to cope with symlinks into the buildtools
+ # directory from within the standalone libraries in Samba. For example,
+ # a symlink to ../../builtools/scripts/autogen-waf.sh needs
+ # to be rewritten as a symlink to buildtools/scripts/autogen-waf.sh
+ # when the tarball for talloc is built
+
+ # the filename without the appname-version
+ rel_fname = '/'.join(fname.split('/')[1:])
+
+ # join this with the symlink target
+ tgt_full = os.path.join(os.path.dirname(rel_fname), tgt)
+
+ # join with the base directory
+ tgt_base = os.path.normpath(os.path.join(basedir, tgt_full))
+
+ # see if this is inside one of our dist_dirs
+ for dir in dist_dirs.split():
+ if dir.find(':') != -1:
+ destdir=dir.split(':')[1]
+ dir=dir.split(':')[0]
+ else:
+ destdir = '.'
+ if dir == basedir:
+ # internal links don't get rewritten
+ continue
+ if dir == tgt_base[0:len(dir)] and tgt_base[len(dir)] == '/':
+ new_tgt = destdir + tgt_base[len(dir):]
+ tinfo.linkname = new_tgt
+ break
+
+ tinfo.uid = 0
+ tinfo.gid = 0
+ tinfo.uname = 'root'
+ tinfo.gname = 'root'
+ tar.addfile(tinfo)
+ return True
+
+def add_tarfile(tar, fname, abspath, basedir):
+ '''add a file to the tarball'''
+ if add_symlink(tar, fname, abspath, basedir):
+ return
+ try:
+ tinfo = tar.gettarinfo(name=abspath, arcname=fname)
+ except OSError:
+ Logs.error('Unable to find file %s - missing from git checkout?' % abspath)
+ sys.exit(1)
+ tinfo.uid = 0
+ tinfo.gid = 0
+ tinfo.uname = 'root'
+ tinfo.gname = 'root'
+ fh = open(abspath)
+ tar.addfile(tinfo, fileobj=fh)
+ fh.close()
+
+
+def vcs_dir_contents(path):
+ """Return the versioned files under a path.
+
+ :return: List of paths relative to path
+ """
+ repo = path
+ while repo != "/":
+ if os.path.isdir(os.path.join(repo, ".git")):
+ ls_files_cmd = [ 'git', 'ls-files', '--full-name',
+ os_path_relpath(path, repo) ]
+ cwd = None
+ env = dict(os.environ)
+ env["GIT_DIR"] = os.path.join(repo, ".git")
+ break
+ elif os.path.isdir(os.path.join(repo, ".bzr")):
+ ls_files_cmd = [ 'bzr', 'ls', '--recursive', '--versioned',
+ os_path_relpath(path, repo)]
+ cwd = repo
+ env = None
+ break
+ repo = os.path.dirname(repo)
+ if repo == "/":
+ raise Exception("unsupported or no vcs for %s" % path)
+ return Utils.cmd_output(ls_files_cmd, cwd=cwd, env=env).split()
+
+
+def dist(appname='', version=''):
+
+ def add_files_to_tarball(tar, srcdir, srcsubdir, dstdir, dstsubdir, blacklist, files):
+ if blacklist is None:
+ blacklist = []
+ for f in files:
+ abspath = os.path.join(srcdir, f)
+
+ if srcsubdir != '.':
+ f = f[len(srcsubdir)+1:]
+
+ # Remove files in the blacklist
+ if f in blacklist:
+ continue
+ blacklisted = False
+ # Remove directories in the blacklist
+ for d in blacklist:
+ if f.startswith(d):
+ blacklisted = True
+ if blacklisted:
+ continue
+ if os.path.isdir(abspath):
+ continue
+ if dstsubdir != '.':
+ f = dstsubdir + '/' + f
+ fname = dstdir + '/' + f
+ add_tarfile(tar, fname, abspath, srcsubdir)
+
+
+ def list_directory_files(path):
+ curdir = os.getcwd()
+ os.chdir(srcdir)
+ out_files = []
+ for root, dirs, files in os.walk(path):
+ for f in files:
+ out_files.append(os.path.join(root, f))
+ os.chdir(curdir)
+ return out_files
+
+
+ if not isinstance(appname, str) or not appname:
+ # this copes with a mismatch in the calling arguments for dist()
+ appname = Utils.g_module.APPNAME
+ version = Utils.g_module.VERSION
+ if not version:
+ version = Utils.g_module.VERSION
+
+ srcdir = os.path.normpath(os.path.join(os.path.dirname(Utils.g_module.root_path), Utils.g_module.srcdir))
+
+ if not dist_dirs:
+ Logs.error('You must use samba_dist.DIST_DIRS() to set which directories to package')
+ sys.exit(1)
+
+ dist_base = '%s-%s' % (appname, version)
+
+ if Options.options.SIGN_RELEASE:
+ dist_name = '%s.tar' % (dist_base)
+ tar = tarfile.open(dist_name, 'w')
+ else:
+ dist_name = '%s.tar.gz' % (dist_base)
+ tar = tarfile.open(dist_name, 'w:gz')
+
+ blacklist = dist_blacklist.split()
+
+ for dir in dist_dirs.split():
+ if dir.find(':') != -1:
+ destdir=dir.split(':')[1]
+ dir=dir.split(':')[0]
+ else:
+ destdir = '.'
+ absdir = os.path.join(srcdir, dir)
+ try:
+ files = vcs_dir_contents(absdir)
+ except Exception, e:
+ Logs.error('unable to get contents of %s: %s' % (absdir, e))
+ sys.exit(1)
+ add_files_to_tarball(tar, srcdir, dir, dist_base, destdir, blacklist, files)
+
+ if dist_files:
+ for file in dist_files.split():
+ if file.find(':') != -1:
+ destfile = file.split(':')[1]
+ file = file.split(':')[0]
+ else:
+ destfile = file
+
+ absfile = os.path.join(srcdir, file)
+
+ if os.path.isdir(absfile):
+ destdir = destfile
+ dir = file
+ files = list_directory_files(dir)
+ add_files_to_tarball(tar, srcdir, dir, dist_base, destdir, blacklist, files)
+ else:
+ fname = dist_base + '/' + destfile
+ add_tarfile(tar, fname, absfile, destfile)
+
+ tar.close()
+
+ if Options.options.SIGN_RELEASE:
+ import gzip
+ try:
+ os.unlink(dist_name + '.asc')
+ except OSError:
+ pass
+
+ cmd = "gpg --detach-sign --armor " + dist_name
+ os.system(cmd)
+ uncompressed_tar = open(dist_name, 'rb')
+ compressed_tar = gzip.open(dist_name + '.gz', 'wb')
+ while 1:
+ buffer = uncompressed_tar.read(1048576)
+ if buffer:
+ compressed_tar.write(buffer)
+ else:
+ break
+ uncompressed_tar.close()
+ compressed_tar.close()
+ os.unlink(dist_name)
+ Logs.info('Created %s.gz %s.asc' % (dist_name, dist_name))
+ dist_name = dist_name + '.gz'
+ else:
+ Logs.info('Created %s' % dist_name)
+
+ return dist_name
+
+
+@conf
+def DIST_DIRS(dirs):
+ '''set the directories to package, relative to top srcdir'''
+ global dist_dirs
+ if not dist_dirs:
+ dist_dirs = dirs
+
+@conf
+def DIST_FILES(files, extend=False):
+ '''set additional files for packaging, relative to top srcdir'''
+ global dist_files
+ if not dist_files:
+ dist_files = files
+ elif extend:
+ dist_files = dist_files + " " + files
+
+@conf
+def DIST_BLACKLIST(blacklist):
+ '''set the files to exclude from packaging, relative to top srcdir'''
+ global dist_blacklist
+ if not dist_blacklist:
+ dist_blacklist = blacklist
+
+Scripting.dist = dist
diff --git a/buildtools/wafsamba/samba_headers.py b/buildtools/wafsamba/samba_headers.py
new file mode 100644
index 0000000..50ccad7
--- /dev/null
+++ b/buildtools/wafsamba/samba_headers.py
@@ -0,0 +1,179 @@
+# specialist handling of header files for Samba
+
+import Build, re, Task, TaskGen, shutil, sys, Logs
+from samba_utils import *
+
+
+def header_install_path(header, header_path):
+ '''find the installation path for a header, given a header_path option'''
+ if not header_path:
+ return ''
+ if not isinstance(header_path, list):
+ return header_path
+ for (p1, dir) in header_path:
+ for p2 in TO_LIST(p1):
+ if fnmatch.fnmatch(header, p2):
+ return dir
+ # default to current path
+ return ''
+
+
+re_header = re.compile('^\s*#\s*include[ \t]*"([^"]+)"', re.I | re.M)
+
+# a dictionary mapping source header paths to public header paths
+header_map = {}
+
+def find_suggested_header(hpath):
+ '''find a suggested header path to use'''
+ base = os.path.basename(hpath)
+ ret = []
+ for h in header_map:
+ if os.path.basename(h) == base:
+ ret.append('<%s>' % header_map[h])
+ ret.append('"%s"' % h)
+ return ret
+
+def create_public_header(task):
+ '''create a public header from a private one, output within the build tree'''
+ src = task.inputs[0].abspath(task.env)
+ tgt = task.outputs[0].bldpath(task.env)
+
+ if os.path.exists(tgt):
+ os.unlink(tgt)
+
+ relsrc = os_path_relpath(src, task.env.TOPDIR)
+
+ infile = open(src, mode='r')
+ outfile = open(tgt, mode='w')
+ linenumber = 0
+
+ search_paths = [ '', task.env.RELPATH ]
+ for i in task.env.EXTRA_INCLUDES:
+ if i.startswith('#'):
+ search_paths.append(i[1:])
+
+ for line in infile:
+ linenumber += 1
+
+ # allow some straight substitutions
+ if task.env.public_headers_replace and line.strip() in task.env.public_headers_replace:
+ outfile.write(task.env.public_headers_replace[line.strip()] + '\n')
+ continue
+
+ # see if its an include line
+ m = re_header.match(line)
+ if m is None:
+ outfile.write(line)
+ continue
+
+ # its an include, get the header path
+ hpath = m.group(1)
+ if hpath.startswith("bin/default/"):
+ hpath = hpath[12:]
+
+ # some are always allowed
+ if task.env.public_headers_skip and hpath in task.env.public_headers_skip:
+ outfile.write(line)
+ continue
+
+ # work out the header this refers to
+ found = False
+ for s in search_paths:
+ p = os.path.normpath(os.path.join(s, hpath))
+ if p in header_map:
+ outfile.write("#include <%s>\n" % header_map[p])
+ found = True
+ break
+ if found:
+ continue
+
+ if task.env.public_headers_allow_broken:
+ Logs.warn("Broken public header include '%s' in '%s'" % (hpath, relsrc))
+ outfile.write(line)
+ continue
+
+ # try to be nice to the developer by suggesting an alternative
+ suggested = find_suggested_header(hpath)
+ outfile.close()
+ os.unlink(tgt)
+ sys.stderr.write("%s:%u:Error: unable to resolve public header %s (maybe try one of %s)\n" % (
+ os.path.relpath(src, os.getcwd()), linenumber, hpath, suggested))
+ raise Utils.WafError("Unable to resolve header path '%s' in public header '%s' in directory %s" % (
+ hpath, relsrc, task.env.RELPATH))
+ infile.close()
+ outfile.close()
+
+
+def public_headers_simple(bld, public_headers, header_path=None, public_headers_install=True):
+ '''install some headers - simple version, no munging needed
+ '''
+ if not public_headers_install:
+ return
+ for h in TO_LIST(public_headers):
+ inst_path = header_install_path(h, header_path)
+ if h.find(':') != -1:
+ s = h.split(":")
+ h_name = s[0]
+ inst_name = s[1]
+ else:
+ h_name = h
+ inst_name = os.path.basename(h)
+ bld.INSTALL_FILES('${INCLUDEDIR}', h_name, destname=inst_name)
+
+
+def PUBLIC_HEADERS(bld, public_headers, header_path=None, public_headers_install=True):
+ '''install some headers
+
+ header_path may either be a string that is added to the INCLUDEDIR,
+ or it can be a dictionary of wildcard patterns which map to destination
+ directories relative to INCLUDEDIR
+ '''
+ bld.SET_BUILD_GROUP('final')
+
+ if not bld.env.build_public_headers:
+ # in this case no header munging neeeded. Used for tdb, talloc etc
+ public_headers_simple(bld, public_headers, header_path=header_path,
+ public_headers_install=public_headers_install)
+ return
+
+ # create the public header in the given path
+ # in the build tree
+ for h in TO_LIST(public_headers):
+ inst_path = header_install_path(h, header_path)
+ if h.find(':') != -1:
+ s = h.split(":")
+ h_name = s[0]
+ inst_name = s[1]
+ else:
+ h_name = h
+ inst_name = os.path.basename(h)
+ relpath1 = os_path_relpath(bld.srcnode.abspath(), bld.curdir)
+ relpath2 = os_path_relpath(bld.curdir, bld.srcnode.abspath())
+ targetdir = os.path.normpath(os.path.join(relpath1, bld.env.build_public_headers, inst_path))
+ if not os.path.exists(os.path.join(bld.curdir, targetdir)):
+ raise Utils.WafError("missing source directory %s for public header %s" % (targetdir, inst_name))
+ target = os.path.join(targetdir, inst_name)
+
+ # the source path of the header, relative to the top of the source tree
+ src_path = os.path.normpath(os.path.join(relpath2, h_name))
+
+ # the install path of the header, relative to the public include directory
+ target_path = os.path.normpath(os.path.join(inst_path, inst_name))
+
+ header_map[src_path] = target_path
+
+ t = bld.SAMBA_GENERATOR('HEADER_%s/%s/%s' % (relpath2, inst_path, inst_name),
+ group='headers',
+ rule=create_public_header,
+ source=h_name,
+ target=target)
+ t.env.RELPATH = relpath2
+ t.env.TOPDIR = bld.srcnode.abspath()
+ if not bld.env.public_headers_list:
+ bld.env.public_headers_list = []
+ bld.env.public_headers_list.append(os.path.join(inst_path, inst_name))
+ if public_headers_install:
+ bld.INSTALL_FILES('${INCLUDEDIR}',
+ target,
+ destname=os.path.join(inst_path, inst_name), flat=True)
+Build.BuildContext.PUBLIC_HEADERS = PUBLIC_HEADERS
diff --git a/buildtools/wafsamba/samba_install.py b/buildtools/wafsamba/samba_install.py
new file mode 100644
index 0000000..aa7f143
--- /dev/null
+++ b/buildtools/wafsamba/samba_install.py
@@ -0,0 +1,230 @@
+###########################
+# this handles the magic we need to do for installing
+# with all the configure options that affect rpath and shared
+# library use
+
+import Options
+from TaskGen import feature, before, after
+from samba_utils import *
+
+@feature('install_bin')
+@after('apply_core')
+@before('apply_link', 'apply_obj_vars')
+def install_binary(self):
+ '''install a binary, taking account of the different rpath varients'''
+ bld = self.bld
+
+ # get the ldflags we will use for install and build
+ install_ldflags = install_rpath(self)
+ build_ldflags = build_rpath(bld)
+
+ if not Options.is_install:
+ # just need to set rpath if we are not installing
+ self.env.RPATH = build_ldflags
+ return
+
+ # work out the install path, expanding variables
+ install_path = getattr(self, 'samba_inst_path', None) or '${BINDIR}'
+ install_path = bld.EXPAND_VARIABLES(install_path)
+
+ orig_target = os.path.basename(self.target)
+
+ if install_ldflags != build_ldflags:
+ # we will be creating a new target name, and using that for the
+ # install link. That stops us from overwriting the existing build
+ # target, which has different ldflags
+ self.target += '.inst'
+
+ # setup the right rpath link flags for the install
+ self.env.RPATH = install_ldflags
+
+ if not self.samba_install:
+ # this binary is marked not to be installed
+ return
+
+ # tell waf to install the right binary
+ bld.install_as(os.path.join(install_path, orig_target),
+ os.path.join(self.path.abspath(bld.env), self.target),
+ chmod=MODE_755)
+
+
+
+@feature('install_lib')
+@after('apply_core')
+@before('apply_link', 'apply_obj_vars')
+def install_library(self):
+ '''install a library, taking account of the different rpath varients'''
+ if getattr(self, 'done_install_library', False):
+ return
+
+ bld = self.bld
+
+ install_ldflags = install_rpath(self)
+ build_ldflags = build_rpath(bld)
+
+ if not Options.is_install or not getattr(self, 'samba_install', True):
+ # just need to set the build rpath if we are not installing
+ self.env.RPATH = build_ldflags
+ return
+
+ # setup the install path, expanding variables
+ install_path = getattr(self, 'samba_inst_path', None)
+ if install_path is None:
+ if getattr(self, 'private_library', False):
+ install_path = '${PRIVATELIBDIR}'
+ else:
+ install_path = '${LIBDIR}'
+ install_path = bld.EXPAND_VARIABLES(install_path)
+
+ target_name = self.target
+
+ if install_ldflags != build_ldflags:
+ # we will be creating a new target name, and using that for the
+ # install link. That stops us from overwriting the existing build
+ # target, which has different ldflags
+ self.done_install_library = True
+ t = self.clone('default')
+ t.posted = False
+ t.target += '.inst'
+ self.env.RPATH = build_ldflags
+ else:
+ t = self
+
+ t.env.RPATH = install_ldflags
+
+ dev_link = None
+
+ # in the following the names are:
+ # - inst_name is the name with .inst. in it, in the build
+ # directory
+ # - install_name is the name in the install directory
+ # - install_link is a symlink in the install directory, to install_name
+
+ if getattr(self, 'samba_realname', None):
+ install_name = self.samba_realname
+ install_link = None
+ if getattr(self, 'soname', ''):
+ install_link = self.soname
+ if getattr(self, 'samba_type', None) == 'PYTHON':
+ inst_name = bld.make_libname(t.target, nolibprefix=True, python=True)
+ else:
+ inst_name = bld.make_libname(t.target)
+ elif self.vnum:
+ vnum_base = self.vnum.split('.')[0]
+ install_name = bld.make_libname(target_name, version=self.vnum)
+ install_link = bld.make_libname(target_name, version=vnum_base)
+ inst_name = bld.make_libname(t.target)
+ if not self.private_library:
+ # only generate the dev link for non-bundled libs
+ dev_link = bld.make_libname(target_name)
+ elif getattr(self, 'soname', ''):
+ install_name = bld.make_libname(target_name)
+ install_link = self.soname
+ inst_name = bld.make_libname(t.target)
+ else:
+ install_name = bld.make_libname(target_name)
+ install_link = None
+ inst_name = bld.make_libname(t.target)
+
+ if t.env.SONAME_ST:
+ # ensure we get the right names in the library
+ if install_link:
+ t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_link)
+ else:
+ t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_name)
+ t.env.SONAME_ST = ''
+
+ # tell waf to install the library
+ bld.install_as(os.path.join(install_path, install_name),
+ os.path.join(self.path.abspath(bld.env), inst_name),
+ chmod=MODE_755)
+ if install_link and install_link != install_name:
+ # and the symlink if needed
+ bld.symlink_as(os.path.join(install_path, install_link), os.path.basename(install_name))
+ if dev_link:
+ bld.symlink_as(os.path.join(install_path, dev_link), os.path.basename(install_name))
+
+
+@feature('cshlib')
+@after('apply_implib')
+@before('apply_vnum')
+def apply_soname(self):
+ '''install a library, taking account of the different rpath varients'''
+
+ if self.env.SONAME_ST and getattr(self, 'soname', ''):
+ self.env.append_value('LINKFLAGS', self.env.SONAME_ST % self.soname)
+ self.env.SONAME_ST = ''
+
+@feature('cshlib')
+@after('apply_implib')
+@before('apply_vnum')
+def apply_vscript(self):
+ '''add version-script arguments to library build'''
+
+ if self.env.HAVE_LD_VERSION_SCRIPT and getattr(self, 'version_script', ''):
+ self.env.append_value('LINKFLAGS', "-Wl,--version-script=%s" %
+ self.version_script)
+ self.version_script = None
+
+
+##############################
+# handle the creation of links for libraries and binaries in the build tree
+
+@feature('symlink_lib')
+@after('apply_link')
+def symlink_lib(self):
+ '''symlink a shared lib'''
+
+ if self.target.endswith('.inst'):
+ return
+
+ blddir = os.path.dirname(self.bld.srcnode.abspath(self.bld.env))
+ libpath = self.link_task.outputs[0].abspath(self.env)
+
+ # calculat the link target and put it in the environment
+ soext=""
+ vnum = getattr(self, 'vnum', None)
+ if vnum is not None:
+ soext = '.' + vnum.split('.')[0]
+
+ link_target = getattr(self, 'link_name', '')
+ if link_target == '':
+ basename = os.path.basename(self.bld.make_libname(self.target, version=soext))
+ if getattr(self, "private_library", False):
+ link_target = '%s/private/%s' % (LIB_PATH, basename)
+ else:
+ link_target = '%s/%s' % (LIB_PATH, basename)
+
+ link_target = os.path.join(blddir, link_target)
+
+ if os.path.lexists(link_target):
+ if os.path.islink(link_target) and os.readlink(link_target) == libpath:
+ return
+ os.unlink(link_target)
+
+ link_container = os.path.dirname(link_target)
+ if not os.path.isdir(link_container):
+ os.makedirs(link_container)
+
+ os.symlink(libpath, link_target)
+
+
+@feature('symlink_bin')
+@after('apply_link')
+def symlink_bin(self):
+ '''symlink a binary into the build directory'''
+
+ if self.target.endswith('.inst'):
+ return
+
+ blddir = os.path.dirname(self.bld.srcnode.abspath(self.bld.env))
+ if not self.link_task.outputs or not self.link_task.outputs[0]:
+ raise Utils.WafError('no outputs found for %s in symlink_bin' % self.name)
+ binpath = self.link_task.outputs[0].abspath(self.env)
+ bldpath = os.path.join(self.bld.env.BUILD_DIRECTORY, self.link_task.outputs[0].name)
+
+ if os.path.lexists(bldpath):
+ if os.path.islink(bldpath) and os.readlink(bldpath) == binpath:
+ return
+ os.unlink(bldpath)
+ os.symlink(binpath, bldpath)
diff --git a/buildtools/wafsamba/samba_optimisation.py b/buildtools/wafsamba/samba_optimisation.py
new file mode 100644
index 0000000..51d514e
--- /dev/null
+++ b/buildtools/wafsamba/samba_optimisation.py
@@ -0,0 +1,289 @@
+# This file contains waf optimisations for Samba
+
+# most of these optimisations are possible because of the restricted build environment
+# that Samba has. For example, Samba doesn't attempt to cope with Win32 paths during the
+# build, and Samba doesn't need build varients
+
+# overall this makes some build tasks quite a bit faster
+
+import os
+import Build, Utils, Node
+from TaskGen import feature, after, before
+import preproc
+
+@feature('cc', 'cxx')
+@after('apply_type_vars', 'apply_lib_vars', 'apply_core')
+def apply_incpaths(self):
+ lst = []
+
+ try:
+ kak = self.bld.kak
+ except AttributeError:
+ kak = self.bld.kak = {}
+
+ # TODO move the uselib processing out of here
+ for lib in self.to_list(self.uselib):
+ for path in self.env['CPPPATH_' + lib]:
+ if not path in lst:
+ lst.append(path)
+ if preproc.go_absolute:
+ for path in preproc.standard_includes:
+ if not path in lst:
+ lst.append(path)
+
+ for path in self.to_list(self.includes):
+ if not path in lst:
+ if preproc.go_absolute or path[0] != '/': # os.path.isabs(path):
+ lst.append(path)
+ else:
+ self.env.prepend_value('CPPPATH', path)
+
+ for path in lst:
+ node = None
+ if path[0] == '/': # os.path.isabs(path):
+ if preproc.go_absolute:
+ node = self.bld.root.find_dir(path)
+ elif path[0] == '#':
+ node = self.bld.srcnode
+ if len(path) > 1:
+ try:
+ node = kak[path]
+ except KeyError:
+ kak[path] = node = node.find_dir(path[1:])
+ else:
+ try:
+ node = kak[(self.path.id, path)]
+ except KeyError:
+ kak[(self.path.id, path)] = node = self.path.find_dir(path)
+
+ if node:
+ self.env.append_value('INC_PATHS', node)
+
+@feature('cc')
+@after('apply_incpaths')
+def apply_obj_vars_cc(self):
+ """after apply_incpaths for INC_PATHS"""
+ env = self.env
+ app = env.append_unique
+ cpppath_st = env['CPPPATH_ST']
+
+ lss = env['_CCINCFLAGS']
+
+ try:
+ cac = self.bld.cac
+ except AttributeError:
+ cac = self.bld.cac = {}
+
+ # local flags come first
+ # set the user-defined includes paths
+ for i in env['INC_PATHS']:
+
+ try:
+ lss.extend(cac[i.id])
+ except KeyError:
+
+ cac[i.id] = [cpppath_st % i.bldpath(env), cpppath_st % i.srcpath(env)]
+ lss.extend(cac[i.id])
+
+ env['_CCINCFLAGS'] = lss
+ # set the library include paths
+ for i in env['CPPPATH']:
+ app('_CCINCFLAGS', cpppath_st % i)
+
+import Node, Environment
+
+def vari(self):
+ return "default"
+Environment.Environment.variant = vari
+
+def variant(self, env):
+ if not env: return 0
+ elif self.id & 3 == Node.FILE: return 0
+ else: return "default"
+Node.Node.variant = variant
+
+
+import TaskGen, Task
+
+def create_task(self, name, src=None, tgt=None):
+ task = Task.TaskBase.classes[name](self.env, generator=self)
+ if src:
+ task.set_inputs(src)
+ if tgt:
+ task.set_outputs(tgt)
+ return task
+TaskGen.task_gen.create_task = create_task
+
+def hash_constraints(self):
+ a = self.attr
+ sum = hash((str(a('before', '')),
+ str(a('after', '')),
+ str(a('ext_in', '')),
+ str(a('ext_out', '')),
+ self.__class__.maxjobs))
+ return sum
+Task.TaskBase.hash_constraints = hash_constraints
+
+def hash_env_vars(self, env, vars_lst):
+ idx = str(id(env)) + str(vars_lst)
+ try:
+ return self.cache_sig_vars[idx]
+ except KeyError:
+ pass
+
+ m = Utils.md5()
+ m.update(''.join([str(env[a]) for a in vars_lst]))
+
+ ret = self.cache_sig_vars[idx] = m.digest()
+ return ret
+Build.BuildContext.hash_env_vars = hash_env_vars
+
+
+def store_fast(self, filename):
+ file = open(filename, 'wb')
+ data = self.get_merged_dict()
+ try:
+ Build.cPickle.dump(data, file, -1)
+ finally:
+ file.close()
+Environment.Environment.store_fast = store_fast
+
+def load_fast(self, filename):
+ file = open(filename, 'rb')
+ try:
+ data = Build.cPickle.load(file)
+ finally:
+ file.close()
+ self.table.update(data)
+Environment.Environment.load_fast = load_fast
+
+def is_this_a_static_lib(self, name):
+ try:
+ cache = self.cache_is_this_a_static_lib
+ except AttributeError:
+ cache = self.cache_is_this_a_static_lib = {}
+ try:
+ return cache[name]
+ except KeyError:
+ ret = cache[name] = 'cstaticlib' in self.bld.name_to_obj(name, self.env).features
+ return ret
+TaskGen.task_gen.is_this_a_static_lib = is_this_a_static_lib
+
+def shared_ancestors(self):
+ try:
+ cache = self.cache_is_this_a_static_lib
+ except AttributeError:
+ cache = self.cache_is_this_a_static_lib = {}
+ try:
+ return cache[id(self)]
+ except KeyError:
+
+ ret = []
+ if 'cshlib' in self.features: # or 'cprogram' in self.features:
+ if getattr(self, 'uselib_local', None):
+ lst = self.to_list(self.uselib_local)
+ ret = [x for x in lst if not self.is_this_a_static_lib(x)]
+ cache[id(self)] = ret
+ return ret
+TaskGen.task_gen.shared_ancestors = shared_ancestors
+
+@feature('cc', 'cxx')
+@after('apply_link', 'init_cc', 'init_cxx', 'apply_core')
+def apply_lib_vars(self):
+ """after apply_link because of 'link_task'
+ after default_cc because of the attribute 'uselib'"""
+
+ # after 'apply_core' in case if 'cc' if there is no link
+
+ env = self.env
+ app = env.append_value
+ seen_libpaths = set([])
+
+ # OPTIMIZATION 1: skip uselib variables already added (700ms)
+ seen_uselib = set([])
+
+ # 1. the case of the libs defined in the project (visit ancestors first)
+ # the ancestors external libraries (uselib) will be prepended
+ self.uselib = self.to_list(self.uselib)
+ names = self.to_list(self.uselib_local)
+
+ seen = set([])
+ tmp = Utils.deque(names) # consume a copy of the list of names
+ while tmp:
+ lib_name = tmp.popleft()
+ # visit dependencies only once
+ if lib_name in seen:
+ continue
+
+ y = self.name_to_obj(lib_name)
+ if not y:
+ raise Utils.WafError('object %r was not found in uselib_local (required by %r)' % (lib_name, self.name))
+ y.post()
+ seen.add(lib_name)
+
+ # OPTIMIZATION 2: pre-compute ancestors shared libraries (100ms)
+ tmp.extend(y.shared_ancestors())
+
+ # link task and flags
+ if getattr(y, 'link_task', None):
+
+ link_name = y.target[y.target.rfind('/') + 1:]
+ if 'cstaticlib' in y.features:
+ app('STATICLIB', link_name)
+ elif 'cshlib' in y.features or 'cprogram' in y.features:
+ # WARNING some linkers can link against programs
+ app('LIB', link_name)
+
+ # the order
+ self.link_task.set_run_after(y.link_task)
+
+ # for the recompilation
+ dep_nodes = getattr(self.link_task, 'dep_nodes', [])
+ self.link_task.dep_nodes = dep_nodes + y.link_task.outputs
+
+ # OPTIMIZATION 3: reduce the amount of function calls
+ # add the link path too
+ par = y.link_task.outputs[0].parent
+ if id(par) not in seen_libpaths:
+ seen_libpaths.add(id(par))
+ tmp_path = par.bldpath(self.env)
+ if not tmp_path in env['LIBPATH']:
+ env.prepend_value('LIBPATH', tmp_path)
+
+
+ # add ancestors uselib too - but only propagate those that have no staticlib
+ for v in self.to_list(y.uselib):
+ if v not in seen_uselib:
+ seen_uselib.add(v)
+ if not env['STATICLIB_' + v]:
+ if not v in self.uselib:
+ self.uselib.insert(0, v)
+
+ # 2. the case of the libs defined outside
+ for x in self.uselib:
+ for v in self.p_flag_vars:
+ val = self.env[v + '_' + x]
+ if val:
+ self.env.append_value(v, val)
+
+@feature('cprogram', 'cshlib', 'cstaticlib')
+@after('apply_lib_vars')
+@before('apply_obj_vars')
+def samba_before_apply_obj_vars(self):
+ """before apply_obj_vars for uselib, this removes the standard pathes"""
+
+ def is_standard_libpath(env, path):
+ for _path in env.STANDARD_LIBPATH:
+ if _path == os.path.normpath(path):
+ return True
+ return False
+
+ v = self.env
+
+ for i in v['RPATH']:
+ if is_standard_libpath(v, i):
+ v['RPATH'].remove(i)
+
+ for i in v['LIBPATH']:
+ if is_standard_libpath(v, i):
+ v['LIBPATH'].remove(i)
diff --git a/buildtools/wafsamba/samba_patterns.py b/buildtools/wafsamba/samba_patterns.py
new file mode 100644
index 0000000..b4427d3
--- /dev/null
+++ b/buildtools/wafsamba/samba_patterns.py
@@ -0,0 +1,210 @@
+# a waf tool to add extension based build patterns for Samba
+
+import Task
+from TaskGen import extension
+from samba_utils import *
+from wafsamba import samba_version_file
+
+def write_version_header(task):
+ '''print version.h contents'''
+ src = task.inputs[0].srcpath(task.env)
+ tgt = task.outputs[0].bldpath(task.env)
+
+ version = samba_version_file(src, task.env.srcdir, env=task.env, is_install=task.env.is_install)
+ string = str(version)
+
+ f = open(tgt, 'w')
+ s = f.write(string)
+ f.close()
+ return 0
+
+
+def SAMBA_MKVERSION(bld, target):
+ '''generate the version.h header for Samba'''
+
+ # We only force waf to re-generate this file if we are installing,
+ # because only then is information not included in the deps (the
+ # git revision) included in the version.
+ t = bld.SAMBA_GENERATOR('VERSION',
+ rule=write_version_header,
+ source= 'VERSION',
+ target=target,
+ always=bld.is_install)
+ t.env.is_install = bld.is_install
+Build.BuildContext.SAMBA_MKVERSION = SAMBA_MKVERSION
+
+
+def write_build_options_header(fp):
+ '''write preamble for build_options.c'''
+ fp.write("/*\n")
+ fp.write(" Unix SMB/CIFS implementation.\n")
+ fp.write(" Build Options for Samba Suite\n")
+ fp.write(" Copyright (C) Vance Lankhaar <vlankhaar@linux.ca> 2003\n")
+ fp.write(" Copyright (C) Andrew Bartlett <abartlet@samba.org> 2001\n")
+ fp.write("\n")
+ fp.write(" This program is free software; you can redistribute it and/or modify\n")
+ fp.write(" it under the terms of the GNU General Public License as published by\n")
+ fp.write(" the Free Software Foundation; either version 3 of the License, or\n")
+ fp.write(" (at your option) any later version.\n")
+ fp.write("\n")
+ fp.write(" This program is distributed in the hope that it will be useful,\n")
+ fp.write(" but WITHOUT ANY WARRANTY; without even the implied warranty of\n")
+ fp.write(" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n")
+ fp.write(" GNU General Public License for more details.\n")
+ fp.write("\n")
+ fp.write(" You should have received a copy of the GNU General Public License\n")
+ fp.write(" along with this program; if not, see <http://www.gnu.org/licenses/>.\n")
+ fp.write("*/\n")
+ fp.write("\n")
+ fp.write("#include \"includes.h\"\n")
+ fp.write("#include \"build_env.h\"\n")
+ fp.write("#include \"dynconfig/dynconfig.h\"\n")
+ fp.write("#include \"lib/cluster_support.h\"\n")
+
+ fp.write("\n")
+ fp.write("static int output(bool screen, const char *format, ...) PRINTF_ATTRIBUTE(2,3);\n")
+ fp.write("void build_options(bool screen);\n")
+ fp.write("\n")
+ fp.write("\n")
+ fp.write("/****************************************************************************\n")
+ fp.write("helper function for build_options\n")
+ fp.write("****************************************************************************/\n")
+ fp.write("static int output(bool screen, const char *format, ...)\n")
+ fp.write("{\n")
+ fp.write(" char *ptr = NULL;\n")
+ fp.write(" int ret = 0;\n")
+ fp.write(" va_list ap;\n")
+ fp.write(" \n")
+ fp.write(" va_start(ap, format);\n")
+ fp.write(" ret = vasprintf(&ptr,format,ap);\n")
+ fp.write(" va_end(ap);\n")
+ fp.write("\n")
+ fp.write(" if (screen) {\n")
+ fp.write(" d_printf(\"%s\", ptr ? ptr : \"\");\n")
+ fp.write(" } else {\n")
+ fp.write(" DEBUG(4,(\"%s\", ptr ? ptr : \"\"));\n")
+ fp.write(" }\n")
+ fp.write(" \n")
+ fp.write(" SAFE_FREE(ptr);\n")
+ fp.write(" return ret;\n")
+ fp.write("}\n")
+ fp.write("\n")
+ fp.write("/****************************************************************************\n")
+ fp.write("options set at build time for the samba suite\n")
+ fp.write("****************************************************************************/\n")
+ fp.write("void build_options(bool screen)\n")
+ fp.write("{\n")
+ fp.write(" if ((DEBUGLEVEL < 4) && (!screen)) {\n")
+ fp.write(" return;\n")
+ fp.write(" }\n")
+ fp.write("\n")
+ fp.write("#ifdef _BUILD_ENV_H\n")
+ fp.write(" /* Output information about the build environment */\n")
+ fp.write(" output(screen,\"Build environment:\\n\");\n")
+ fp.write(" output(screen,\" Built by: %s@%s\\n\",BUILD_ENV_USER,BUILD_ENV_HOST);\n")
+ fp.write(" output(screen,\" Built on: %s\\n\",BUILD_ENV_DATE);\n")
+ fp.write("\n")
+ fp.write(" output(screen,\" Built using: %s\\n\",BUILD_ENV_COMPILER);\n")
+ fp.write(" output(screen,\" Build host: %s\\n\",BUILD_ENV_UNAME);\n")
+ fp.write(" output(screen,\" SRCDIR: %s\\n\",BUILD_ENV_SRCDIR);\n")
+ fp.write(" output(screen,\" BUILDDIR: %s\\n\",BUILD_ENV_BUILDDIR);\n")
+ fp.write("\n")
+ fp.write("\n")
+ fp.write("#endif\n")
+ fp.write("\n")
+ fp.write(" /* Output various paths to files and directories */\n")
+ fp.write(" output(screen,\"\\nPaths:\\n\");\n")
+ fp.write(" output(screen,\" SBINDIR: %s\\n\", get_dyn_SBINDIR());\n")
+ fp.write(" output(screen,\" BINDIR: %s\\n\", get_dyn_BINDIR());\n")
+ fp.write(" output(screen,\" CONFIGFILE: %s\\n\", get_dyn_CONFIGFILE());\n")
+ fp.write(" output(screen,\" LOGFILEBASE: %s\\n\", get_dyn_LOGFILEBASE());\n")
+ fp.write(" output(screen,\" LMHOSTSFILE: %s\\n\",get_dyn_LMHOSTSFILE());\n")
+ fp.write(" output(screen,\" LIBDIR: %s\\n\",get_dyn_LIBDIR());\n")
+ fp.write(" output(screen,\" MODULESDIR: %s\\n\",get_dyn_MODULESDIR());\n")
+ fp.write(" output(screen,\" SHLIBEXT: %s\\n\",get_dyn_SHLIBEXT());\n")
+ fp.write(" output(screen,\" LOCKDIR: %s\\n\",get_dyn_LOCKDIR());\n")
+ fp.write(" output(screen,\" STATEDIR: %s\\n\",get_dyn_STATEDIR());\n")
+ fp.write(" output(screen,\" CACHEDIR: %s\\n\",get_dyn_CACHEDIR());\n")
+ fp.write(" output(screen,\" PIDDIR: %s\\n\", get_dyn_PIDDIR());\n")
+ fp.write(" output(screen,\" SMB_PASSWD_FILE: %s\\n\",get_dyn_SMB_PASSWD_FILE());\n")
+ fp.write(" output(screen,\" PRIVATE_DIR: %s\\n\",get_dyn_PRIVATE_DIR());\n")
+ fp.write("\n")
+
+def write_build_options_footer(fp):
+ fp.write(" /* Output the sizes of the various cluster features */\n")
+ fp.write(" output(screen, \"\\n%s\", cluster_support_features());\n")
+ fp.write("\n")
+ fp.write(" /* Output the sizes of the various types */\n")
+ fp.write(" output(screen, \"\\nType sizes:\\n\");\n")
+ fp.write(" output(screen, \" sizeof(char): %lu\\n\",(unsigned long)sizeof(char));\n")
+ fp.write(" output(screen, \" sizeof(int): %lu\\n\",(unsigned long)sizeof(int));\n")
+ fp.write(" output(screen, \" sizeof(long): %lu\\n\",(unsigned long)sizeof(long));\n")
+ fp.write(" output(screen, \" sizeof(long long): %lu\\n\",(unsigned long)sizeof(long long));\n")
+ fp.write(" output(screen, \" sizeof(uint8): %lu\\n\",(unsigned long)sizeof(uint8));\n")
+ fp.write(" output(screen, \" sizeof(uint16): %lu\\n\",(unsigned long)sizeof(uint16));\n")
+ fp.write(" output(screen, \" sizeof(uint32): %lu\\n\",(unsigned long)sizeof(uint32));\n")
+ fp.write(" output(screen, \" sizeof(short): %lu\\n\",(unsigned long)sizeof(short));\n")
+ fp.write(" output(screen, \" sizeof(void*): %lu\\n\",(unsigned long)sizeof(void*));\n")
+ fp.write(" output(screen, \" sizeof(size_t): %lu\\n\",(unsigned long)sizeof(size_t));\n")
+ fp.write(" output(screen, \" sizeof(off_t): %lu\\n\",(unsigned long)sizeof(off_t));\n")
+ fp.write(" output(screen, \" sizeof(ino_t): %lu\\n\",(unsigned long)sizeof(ino_t));\n")
+ fp.write(" output(screen, \" sizeof(dev_t): %lu\\n\",(unsigned long)sizeof(dev_t));\n")
+ fp.write("\n")
+ fp.write(" output(screen, \"\\nBuiltin modules:\\n\");\n")
+ fp.write(" output(screen, \" %s\\n\", STRING_STATIC_MODULES);\n")
+ fp.write("}\n")
+
+def write_build_options_section(fp, keys, section):
+ fp.write("\n\t/* Show %s */\n" % section)
+ fp.write(" output(screen, \"\\n%s:\\n\");\n\n" % section)
+
+ for k in sorted(keys):
+ fp.write("#ifdef %s\n" % k)
+ fp.write(" output(screen, \" %s\\n\");\n" % k)
+ fp.write("#endif\n")
+ fp.write("\n")
+
+def write_build_options(task):
+ tbl = task.env['defines']
+ keys_option_with = []
+ keys_option_utmp = []
+ keys_option_have = []
+ keys_header_sys = []
+ keys_header_other = []
+ keys_misc = []
+ for key in tbl:
+ if key.startswith("HAVE_UT_UT_") or key.find("UTMP") >= 0:
+ keys_option_utmp.append(key)
+ elif key.startswith("WITH_"):
+ keys_option_with.append(key)
+ elif key.startswith("HAVE_SYS_"):
+ keys_header_sys.append(key)
+ elif key.startswith("HAVE_"):
+ if key.endswith("_H"):
+ keys_header_other.append(key)
+ else:
+ keys_option_have.append(key)
+ else:
+ keys_misc.append(key)
+
+ tgt = task.outputs[0].bldpath(task.env)
+ f = open(tgt, 'w')
+ write_build_options_header(f)
+ write_build_options_section(f, keys_header_sys, "System Headers")
+ write_build_options_section(f, keys_header_other, "Headers")
+ write_build_options_section(f, keys_option_utmp, "UTMP Options")
+ write_build_options_section(f, keys_option_have, "HAVE_* Defines")
+ write_build_options_section(f, keys_option_with, "--with Options")
+ write_build_options_section(f, keys_misc, "Build Options")
+ write_build_options_footer(f)
+ f.close()
+ return 0
+
+
+def SAMBA_BLDOPTIONS(bld, target):
+ '''generate the bld_options.c for Samba'''
+ t = bld.SAMBA_GENERATOR(target,
+ rule=write_build_options,
+ dep_vars=['defines'],
+ target=target)
+Build.BuildContext.SAMBA_BLDOPTIONS = SAMBA_BLDOPTIONS
diff --git a/buildtools/wafsamba/samba_perl.py b/buildtools/wafsamba/samba_perl.py
new file mode 100644
index 0000000..3909aba
--- /dev/null
+++ b/buildtools/wafsamba/samba_perl.py
@@ -0,0 +1,62 @@
+import Build
+from samba_utils import *
+from Configure import conf
+
+done = {}
+
+@conf
+def SAMBA_CHECK_PERL(conf, mandatory=True, version=(5,0,0)):
+ #
+ # TODO: use the @runonce mechanism for this.
+ # The problem is that @runonce currently does
+ # not seem to work together with @conf...
+ # So @runonce (and/or) @conf needs fixing.
+ #
+ if "done" in done:
+ return
+ done["done"] = True
+ conf.find_program('perl', var='PERL', mandatory=mandatory)
+ conf.check_tool('perl')
+ path_perl = conf.find_program('perl')
+ conf.env.PERL_SPECIFIED = (conf.env.PERL != path_perl)
+ conf.check_perl_version(version)
+
+ def read_perl_config_var(cmd):
+ return Utils.to_list(Utils.cmd_output([conf.env.PERL, '-MConfig', '-e', cmd]))
+
+ def check_perl_config_var(var):
+ conf.start_msg("Checking for perl $Config{%s}:" % var)
+ try:
+ v = read_perl_config_var('print $Config{%s}' % var)[0]
+ conf.end_msg("'%s'" % (v), 'GREEN')
+ return v
+ except IndexError:
+ conf.end_msg(False, 'YELLOW')
+ pass
+ return None
+
+ vendor_prefix = check_perl_config_var('vendorprefix')
+
+ perl_arch_install_dir = None
+ if vendor_prefix == conf.env.PREFIX:
+ perl_arch_install_dir = check_perl_config_var('vendorarch');
+ if perl_arch_install_dir is None:
+ perl_arch_install_dir = "${LIBDIR}/perl5";
+ conf.start_msg("PERL_ARCH_INSTALL_DIR: ")
+ conf.end_msg("'%s'" % (perl_arch_install_dir), 'GREEN')
+ conf.env.PERL_ARCH_INSTALL_DIR = perl_arch_install_dir
+
+ perl_lib_install_dir = None
+ if vendor_prefix == conf.env.PREFIX:
+ perl_lib_install_dir = check_perl_config_var('vendorlib');
+ if perl_lib_install_dir is None:
+ perl_lib_install_dir = "${DATADIR}/perl5";
+ conf.start_msg("PERL_LIB_INSTALL_DIR: ")
+ conf.end_msg("'%s'" % (perl_lib_install_dir), 'GREEN')
+ conf.env.PERL_LIB_INSTALL_DIR = perl_lib_install_dir
+
+ perl_inc = read_perl_config_var('print "@INC"')
+ perl_inc.remove('.')
+ conf.start_msg("PERL_INC: ")
+ conf.end_msg("%s" % (perl_inc), 'GREEN')
+ conf.env.PERL_INC = perl_inc
diff --git a/buildtools/wafsamba/samba_pidl.py b/buildtools/wafsamba/samba_pidl.py
new file mode 100644
index 0000000..110b15e
--- /dev/null
+++ b/buildtools/wafsamba/samba_pidl.py
@@ -0,0 +1,145 @@
+# waf build tool for building IDL files with pidl
+
+from TaskGen import before
+import Build, os, sys, Logs
+from samba_utils import *
+
+def SAMBA_PIDL(bld, pname, source,
+ options='',
+ output_dir='.',
+ generate_tables=True):
+ '''Build a IDL file using pidl.
+ This will produce up to 13 output files depending on the options used'''
+
+ bname = source[0:-4]; # strip off the .idl suffix
+ bname = os.path.basename(bname)
+ name = "%s_%s" % (pname, bname.upper())
+
+ if not SET_TARGET_TYPE(bld, name, 'PIDL'):
+ return
+
+ bld.SET_BUILD_GROUP('build_source')
+
+ # the output files depend on the options used. Use this dictionary
+ # to map between the options and the resulting file names
+ options_map = { '--header' : '%s.h',
+ '--ndr-parser' : 'ndr_%s.c ndr_%s.h',
+ '--samba3-ndr-server' : 'srv_%s.c srv_%s.h',
+ '--samba3-ndr-client' : 'cli_%s.c cli_%s.h',
+ '--server' : 'ndr_%s_s.c',
+ '--client' : 'ndr_%s_c.c ndr_%s_c.h',
+ '--python' : 'py_%s.c',
+ '--tdr-parser' : 'tdr_%s.c tdr_%s.h',
+ '--dcom-proxy' : '%s_p.c',
+ '--com-header' : 'com_%s.h'
+ }
+
+ table_header_idx = None
+ out_files = []
+ options_list = TO_LIST(options)
+
+ for o in options_list:
+ if o in options_map:
+ ofiles = TO_LIST(options_map[o])
+ for f in ofiles:
+ out_files.append(os.path.join(output_dir, f % bname))
+ if f == 'ndr_%s.h':
+ # remember this one for the tables generation
+ table_header_idx = len(out_files) - 1
+
+ # depend on the full pidl sources
+ source = TO_LIST(source)
+ try:
+ pidl_src_nodes = bld.pidl_files_cache
+ except AttributeError:
+ bld.pidl_files_cache = bld.srcnode.ant_glob('pidl/lib/Parse/**/*.pm', flat=False)
+ bld.pidl_files_cache.extend(bld.srcnode.ant_glob('pidl', flat=False))
+ pidl_src_nodes = bld.pidl_files_cache
+
+ # the cd .. is needed because pidl currently is sensitive to the directory it is run in
+ cpp = ""
+ cc = ""
+ if bld.CONFIG_SET("CPP") and bld.CONFIG_GET("CPP") != "":
+ if isinstance(bld.CONFIG_GET("CPP"), list):
+ cpp = 'CPP="%s"' % " ".join(bld.CONFIG_GET("CPP"))
+ else:
+ cpp = 'CPP="%s"' % bld.CONFIG_GET("CPP")
+
+ if cpp == "CPP=xlc_r":
+ cpp = ""
+
+
+ if bld.CONFIG_SET("CC"):
+ if isinstance(bld.CONFIG_GET("CC"), list):
+ cc = 'CC="%s"' % " ".join(bld.CONFIG_GET("CC"))
+ else:
+ cc = 'CC="%s"' % bld.CONFIG_GET("CC")
+
+ t = bld(rule='cd .. && %s %s ${PERL} "${PIDL}" --quiet ${OPTIONS} --outputdir ${OUTPUTDIR} -- "${SRC[0].abspath(env)}"' % (cpp, cc),
+ ext_out = '.c',
+ before = 'cc',
+ update_outputs = True,
+ shell = True,
+ source = source,
+ target = out_files,
+ name = name,
+ samba_type = 'PIDL')
+
+ # prime the list of nodes we are dependent on with the cached pidl sources
+ t.allnodes = pidl_src_nodes
+
+ t.env.PIDL = os.path.join(bld.srcnode.abspath(), 'pidl/pidl')
+ t.env.OPTIONS = TO_LIST(options)
+ t.env.OUTPUTDIR = bld.bldnode.name + '/' + bld.path.find_dir(output_dir).bldpath(t.env)
+
+ if generate_tables and table_header_idx is not None:
+ pidl_headers = LOCAL_CACHE(bld, 'PIDL_HEADERS')
+ pidl_headers[name] = [bld.path.find_or_declare(out_files[table_header_idx])]
+
+ t.more_includes = '#' + bld.path.relpath_gen(bld.srcnode)
+Build.BuildContext.SAMBA_PIDL = SAMBA_PIDL
+
+
+def SAMBA_PIDL_LIST(bld, name, source,
+ options='',
+ output_dir='.',
+ generate_tables=True):
+ '''A wrapper for building a set of IDL files'''
+ for p in TO_LIST(source):
+ bld.SAMBA_PIDL(name, p, options=options, output_dir=output_dir, generate_tables=generate_tables)
+Build.BuildContext.SAMBA_PIDL_LIST = SAMBA_PIDL_LIST
+
+
+#################################################################
+# the rule for generating the NDR tables
+from TaskGen import feature, before
+@feature('collect')
+@before('exec_rule')
+def collect(self):
+ pidl_headers = LOCAL_CACHE(self.bld, 'PIDL_HEADERS')
+ for (name, hd) in pidl_headers.items():
+ y = self.bld.name_to_obj(name, self.env)
+ self.bld.ASSERT(y is not None, 'Failed to find PIDL header %s' % name)
+ y.post()
+ for node in hd:
+ self.bld.ASSERT(node is not None, 'Got None as build node generating PIDL table for %s' % name)
+ self.source += " " + node.relpath_gen(self.path)
+
+
+def SAMBA_PIDL_TABLES(bld, name, target):
+ '''generate the pidl NDR tables file'''
+ headers = bld.env.PIDL_HEADERS
+ bld.SET_BUILD_GROUP('main')
+ t = bld(
+ features = 'collect',
+ rule = '${PERL} ${SRC} --output ${TGT} | sed "s|default/||" > ${TGT}',
+ ext_out = '.c',
+ before = 'cc',
+ update_outputs = True,
+ shell = True,
+ source = '../../librpc/tables.pl',
+ target = target,
+ name = name)
+ t.env.LIBRPC = os.path.join(bld.srcnode.abspath(), 'librpc')
+Build.BuildContext.SAMBA_PIDL_TABLES = SAMBA_PIDL_TABLES
+
diff --git a/buildtools/wafsamba/samba_python.py b/buildtools/wafsamba/samba_python.py
new file mode 100644
index 0000000..a371b43
--- /dev/null
+++ b/buildtools/wafsamba/samba_python.py
@@ -0,0 +1,71 @@
+# waf build tool for building IDL files with pidl
+
+import Build
+from samba_utils import *
+from samba_autoconf import *
+
+from Configure import conf
+
+@conf
+def SAMBA_CHECK_PYTHON(conf, mandatory=True, version=(2,4,2)):
+ # enable tool to build python extensions
+ conf.find_program('python', var='PYTHON', mandatory=mandatory)
+ conf.check_tool('python')
+ path_python = conf.find_program('python')
+ conf.env.PYTHON_SPECIFIED = (conf.env.PYTHON != path_python)
+ conf.check_python_version(version)
+
+@conf
+def SAMBA_CHECK_PYTHON_HEADERS(conf, mandatory=True):
+ if conf.env["python_headers_checked"] == []:
+ conf.check_python_headers(mandatory)
+ conf.env["python_headers_checked"] = "yes"
+ else:
+ conf.msg("python headers", "using cache")
+
+
+def SAMBA_PYTHON(bld, name,
+ source='',
+ deps='',
+ public_deps='',
+ realname=None,
+ cflags='',
+ includes='',
+ init_function_sentinel=None,
+ local_include=True,
+ vars=None,
+ install=True,
+ enabled=True):
+ '''build a python extension for Samba'''
+
+ # when we support static python modules we'll need to gather
+ # the list from all the SAMBA_PYTHON() targets
+ if init_function_sentinel is not None:
+ cflags += '-DSTATIC_LIBPYTHON_MODULES=%s' % init_function_sentinel
+
+ source = bld.EXPAND_VARIABLES(source, vars=vars)
+
+ if realname is not None:
+ link_name = 'python_modules/%s' % realname
+ else:
+ link_name = None
+
+ bld.SAMBA_LIBRARY(name,
+ source=source,
+ deps=deps,
+ public_deps=public_deps,
+ includes=includes,
+ cflags=cflags,
+ local_include=local_include,
+ vars=vars,
+ realname=realname,
+ link_name=link_name,
+ pyext=True,
+ target_type='PYTHON',
+ install_path='${PYTHONARCHDIR}',
+ allow_undefined_symbols=True,
+ allow_warnings=True,
+ install=install,
+ enabled=enabled)
+
+Build.BuildContext.SAMBA_PYTHON = SAMBA_PYTHON
diff --git a/buildtools/wafsamba/samba_third_party.py b/buildtools/wafsamba/samba_third_party.py
new file mode 100644
index 0000000..46a1b94
--- /dev/null
+++ b/buildtools/wafsamba/samba_third_party.py
@@ -0,0 +1,35 @@
+# functions to support third party libraries
+
+from Configure import conf
+import sys, Logs, os
+from samba_bundled import *
+
+@conf
+def CHECK_FOR_THIRD_PARTY(conf):
+ return os.path.exists(os.path.join(Utils.g_module.srcdir, 'third_party'))
+
+Build.BuildContext.CHECK_FOR_THIRD_PARTY = CHECK_FOR_THIRD_PARTY
+
+@conf
+def CHECK_ZLIB(conf):
+ version_check='''
+ #if (ZLIB_VERNUM >= 0x1230)
+ #else
+ #error "ZLIB_VERNUM < 0x1230"
+ #endif
+ z_stream *z;
+ inflateInit2(z, -15);
+ '''
+ return conf.CHECK_BUNDLED_SYSTEM('z', minversion='1.2.3', pkg='zlib',
+ checkfunctions='zlibVersion',
+ headers='zlib.h',
+ checkcode=version_check,
+ implied_deps='replace')
+
+Build.BuildContext.CHECK_ZLIB = CHECK_ZLIB
+
+@conf
+def CHECK_POPT(conf):
+ return conf.CHECK_BUNDLED_SYSTEM('popt', checkfunctions='poptGetContext', headers='popt.h')
+
+Build.BuildContext.CHECK_POPT = CHECK_POPT
diff --git a/buildtools/wafsamba/samba_utils.py b/buildtools/wafsamba/samba_utils.py
new file mode 100644
index 0000000..e8bc0f3
--- /dev/null
+++ b/buildtools/wafsamba/samba_utils.py
@@ -0,0 +1,668 @@
+# a waf tool to add autoconf-like macros to the configure section
+# and for SAMBA_ macros for building libraries, binaries etc
+
+import Build, os, sys, Options, Utils, Task, re, fnmatch, Logs
+from TaskGen import feature, before
+from Configure import conf, ConfigurationContext
+from Logs import debug
+import shlex
+
+# TODO: make this a --option
+LIB_PATH="shared"
+
+
+# sigh, python octal constants are a mess
+MODE_644 = int('644', 8)
+MODE_755 = int('755', 8)
+
+@conf
+def SET_TARGET_TYPE(ctx, target, value):
+ '''set the target type of a target'''
+ cache = LOCAL_CACHE(ctx, 'TARGET_TYPE')
+ if target in cache and cache[target] != 'EMPTY':
+ Logs.error("ERROR: Target '%s' in directory %s re-defined as %s - was %s" % (target, ctx.curdir, value, cache[target]))
+ sys.exit(1)
+ LOCAL_CACHE_SET(ctx, 'TARGET_TYPE', target, value)
+ debug("task_gen: Target '%s' created of type '%s' in %s" % (target, value, ctx.curdir))
+ return True
+
+
+def GET_TARGET_TYPE(ctx, target):
+ '''get target type from cache'''
+ cache = LOCAL_CACHE(ctx, 'TARGET_TYPE')
+ if not target in cache:
+ return None
+ return cache[target]
+
+
+######################################################
+# this is used as a decorator to make functions only
+# run once. Based on the idea from
+# http://stackoverflow.com/questions/815110/is-there-a-decorator-to-simply-cache-function-return-values
+def runonce(function):
+ runonce_ret = {}
+ def runonce_wrapper(*args):
+ if args in runonce_ret:
+ return runonce_ret[args]
+ else:
+ ret = function(*args)
+ runonce_ret[args] = ret
+ return ret
+ return runonce_wrapper
+
+
+def ADD_LD_LIBRARY_PATH(path):
+ '''add something to LD_LIBRARY_PATH'''
+ if 'LD_LIBRARY_PATH' in os.environ:
+ oldpath = os.environ['LD_LIBRARY_PATH']
+ else:
+ oldpath = ''
+ newpath = oldpath.split(':')
+ if not path in newpath:
+ newpath.append(path)
+ os.environ['LD_LIBRARY_PATH'] = ':'.join(newpath)
+
+
+def needs_private_lib(bld, target):
+ '''return True if a target links to a private library'''
+ for lib in getattr(target, "final_libs", []):
+ t = bld.name_to_obj(lib, bld.env)
+ if t and getattr(t, 'private_library', False):
+ return True
+ return False
+
+
+def install_rpath(target):
+ '''the rpath value for installation'''
+ bld = target.bld
+ bld.env['RPATH'] = []
+ ret = set()
+ if bld.env.RPATH_ON_INSTALL:
+ ret.add(bld.EXPAND_VARIABLES(bld.env.LIBDIR))
+ if bld.env.RPATH_ON_INSTALL_PRIVATE and needs_private_lib(bld, target):
+ ret.add(bld.EXPAND_VARIABLES(bld.env.PRIVATELIBDIR))
+ return list(ret)
+
+
+def build_rpath(bld):
+ '''the rpath value for build'''
+ rpaths = [os.path.normpath('%s/%s' % (bld.env.BUILD_DIRECTORY, d)) for d in ("shared", "shared/private")]
+ bld.env['RPATH'] = []
+ if bld.env.RPATH_ON_BUILD:
+ return rpaths
+ for rpath in rpaths:
+ ADD_LD_LIBRARY_PATH(rpath)
+ return []
+
+
+@conf
+def LOCAL_CACHE(ctx, name):
+ '''return a named build cache dictionary, used to store
+ state inside other functions'''
+ if name in ctx.env:
+ return ctx.env[name]
+ ctx.env[name] = {}
+ return ctx.env[name]
+
+
+@conf
+def LOCAL_CACHE_SET(ctx, cachename, key, value):
+ '''set a value in a local cache'''
+ cache = LOCAL_CACHE(ctx, cachename)
+ cache[key] = value
+
+
+@conf
+def ASSERT(ctx, expression, msg):
+ '''a build assert call'''
+ if not expression:
+ raise Utils.WafError("ERROR: %s\n" % msg)
+Build.BuildContext.ASSERT = ASSERT
+
+
+def SUBDIR(bld, subdir, list):
+ '''create a list of files by pre-pending each with a subdir name'''
+ ret = ''
+ for l in TO_LIST(list):
+ ret = ret + os.path.normpath(os.path.join(subdir, l)) + ' '
+ return ret
+Build.BuildContext.SUBDIR = SUBDIR
+
+
+def dict_concat(d1, d2):
+ '''concatenate two dictionaries d1 += d2'''
+ for t in d2:
+ if t not in d1:
+ d1[t] = d2[t]
+
+
+def exec_command(self, cmd, **kw):
+ '''this overrides the 'waf -v' debug output to be in a nice
+ unix like format instead of a python list.
+ Thanks to ita on #waf for this'''
+ import Utils, Logs
+ _cmd = cmd
+ if isinstance(cmd, list):
+ _cmd = ' '.join(cmd)
+ debug('runner: %s' % _cmd)
+ if self.log:
+ self.log.write('%s\n' % cmd)
+ kw['log'] = self.log
+ try:
+ if not kw.get('cwd', None):
+ kw['cwd'] = self.cwd
+ except AttributeError:
+ self.cwd = kw['cwd'] = self.bldnode.abspath()
+ return Utils.exec_command(cmd, **kw)
+Build.BuildContext.exec_command = exec_command
+
+
+def ADD_COMMAND(opt, name, function):
+ '''add a new top level command to waf'''
+ Utils.g_module.__dict__[name] = function
+ opt.name = function
+Options.Handler.ADD_COMMAND = ADD_COMMAND
+
+
+@feature('cc', 'cshlib', 'cprogram')
+@before('apply_core','exec_rule')
+def process_depends_on(self):
+ '''The new depends_on attribute for build rules
+ allow us to specify a dependency on output from
+ a source generation rule'''
+ if getattr(self , 'depends_on', None):
+ lst = self.to_list(self.depends_on)
+ for x in lst:
+ y = self.bld.name_to_obj(x, self.env)
+ self.bld.ASSERT(y is not None, "Failed to find dependency %s of %s" % (x, self.name))
+ y.post()
+ if getattr(y, 'more_includes', None):
+ self.includes += " " + y.more_includes
+
+
+os_path_relpath = getattr(os.path, 'relpath', None)
+if os_path_relpath is None:
+ # Python < 2.6 does not have os.path.relpath, provide a replacement
+ # (imported from Python2.6.5~rc2)
+ def os_path_relpath(path, start):
+ """Return a relative version of a path"""
+ start_list = os.path.abspath(start).split("/")
+ path_list = os.path.abspath(path).split("/")
+
+ # Work out how much of the filepath is shared by start and path.
+ i = len(os.path.commonprefix([start_list, path_list]))
+
+ rel_list = ['..'] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return start
+ return os.path.join(*rel_list)
+
+
+def unique_list(seq):
+ '''return a uniquified list in the same order as the existing list'''
+ seen = {}
+ result = []
+ for item in seq:
+ if item in seen: continue
+ seen[item] = True
+ result.append(item)
+ return result
+
+
+def TO_LIST(str, delimiter=None):
+ '''Split a list, preserving quoted strings and existing lists'''
+ if str is None:
+ return []
+ if isinstance(str, list):
+ # we need to return a new independent list...
+ return list(str)
+ if len(str) == 0:
+ return []
+ lst = str.split(delimiter)
+ # the string may have had quotes in it, now we
+ # check if we did have quotes, and use the slower shlex
+ # if we need to
+ for e in lst:
+ if e[0] == '"':
+ return shlex.split(str)
+ return lst
+
+
+def subst_vars_error(string, env):
+ '''substitute vars, throw an error if a variable is not defined'''
+ lst = re.split('(\$\{\w+\})', string)
+ out = []
+ for v in lst:
+ if re.match('\$\{\w+\}', v):
+ vname = v[2:-1]
+ if not vname in env:
+ raise KeyError("Failed to find variable %s in %s" % (vname, string))
+ v = env[vname]
+ out.append(v)
+ return ''.join(out)
+
+
+@conf
+def SUBST_ENV_VAR(ctx, varname):
+ '''Substitute an environment variable for any embedded variables'''
+ return subst_vars_error(ctx.env[varname], ctx.env)
+Build.BuildContext.SUBST_ENV_VAR = SUBST_ENV_VAR
+
+
+def ENFORCE_GROUP_ORDERING(bld):
+ '''enforce group ordering for the project. This
+ makes the group ordering apply only when you specify
+ a target with --target'''
+ if Options.options.compile_targets:
+ @feature('*')
+ @before('exec_rule', 'apply_core', 'collect')
+ def force_previous_groups(self):
+ if getattr(self.bld, 'enforced_group_ordering', False):
+ return
+ self.bld.enforced_group_ordering = True
+
+ def group_name(g):
+ tm = self.bld.task_manager
+ return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0]
+
+ my_id = id(self)
+ bld = self.bld
+ stop = None
+ for g in bld.task_manager.groups:
+ for t in g.tasks_gen:
+ if id(t) == my_id:
+ stop = id(g)
+ debug('group: Forcing up to group %s for target %s',
+ group_name(g), self.name or self.target)
+ break
+ if stop is not None:
+ break
+ if stop is None:
+ return
+
+ for i in xrange(len(bld.task_manager.groups)):
+ g = bld.task_manager.groups[i]
+ bld.task_manager.current_group = i
+ if id(g) == stop:
+ break
+ debug('group: Forcing group %s', group_name(g))
+ for t in g.tasks_gen:
+ if not getattr(t, 'forced_groups', False):
+ debug('group: Posting %s', t.name or t.target)
+ t.forced_groups = True
+ t.post()
+Build.BuildContext.ENFORCE_GROUP_ORDERING = ENFORCE_GROUP_ORDERING
+
+
+def recursive_dirlist(dir, relbase, pattern=None):
+ '''recursive directory list'''
+ ret = []
+ for f in os.listdir(dir):
+ f2 = dir + '/' + f
+ if os.path.isdir(f2):
+ ret.extend(recursive_dirlist(f2, relbase))
+ else:
+ if pattern and not fnmatch.fnmatch(f, pattern):
+ continue
+ ret.append(os_path_relpath(f2, relbase))
+ return ret
+
+
+def mkdir_p(dir):
+ '''like mkdir -p'''
+ if not dir:
+ return
+ if dir.endswith("/"):
+ mkdir_p(dir[:-1])
+ return
+ if os.path.isdir(dir):
+ return
+ mkdir_p(os.path.dirname(dir))
+ os.mkdir(dir)
+
+
+def SUBST_VARS_RECURSIVE(string, env):
+ '''recursively expand variables'''
+ if string is None:
+ return string
+ limit=100
+ while (string.find('${') != -1 and limit > 0):
+ string = subst_vars_error(string, env)
+ limit -= 1
+ return string
+
+
+@conf
+def EXPAND_VARIABLES(ctx, varstr, vars=None):
+ '''expand variables from a user supplied dictionary
+
+ This is most useful when you pass vars=locals() to expand
+ all your local variables in strings
+ '''
+
+ if isinstance(varstr, list):
+ ret = []
+ for s in varstr:
+ ret.append(EXPAND_VARIABLES(ctx, s, vars=vars))
+ return ret
+
+ if not isinstance(varstr, str):
+ return varstr
+
+ import Environment
+ env = Environment.Environment()
+ ret = varstr
+ # substitute on user supplied dict if avaiilable
+ if vars is not None:
+ for v in vars.keys():
+ env[v] = vars[v]
+ ret = SUBST_VARS_RECURSIVE(ret, env)
+
+ # if anything left, subst on the environment as well
+ if ret.find('${') != -1:
+ ret = SUBST_VARS_RECURSIVE(ret, ctx.env)
+ # make sure there is nothing left. Also check for the common
+ # typo of $( instead of ${
+ if ret.find('${') != -1 or ret.find('$(') != -1:
+ Logs.error('Failed to substitute all variables in varstr=%s' % ret)
+ sys.exit(1)
+ return ret
+Build.BuildContext.EXPAND_VARIABLES = EXPAND_VARIABLES
+
+
+def RUN_COMMAND(cmd,
+ env=None,
+ shell=False):
+ '''run a external command, return exit code or signal'''
+ if env:
+ cmd = SUBST_VARS_RECURSIVE(cmd, env)
+
+ status = os.system(cmd)
+ if os.WIFEXITED(status):
+ return os.WEXITSTATUS(status)
+ if os.WIFSIGNALED(status):
+ return - os.WTERMSIG(status)
+ Logs.error("Unknown exit reason %d for command: %s" (status, cmd))
+ return -1
+
+
+# make sure we have md5. some systems don't have it
+try:
+ from hashlib import md5
+ # Even if hashlib.md5 exists, it may be unusable.
+ # Try to use MD5 function. In FIPS mode this will cause an exception
+ # and we'll get to the replacement code
+ foo = md5('abcd')
+except:
+ try:
+ import md5
+ # repeat the same check here, mere success of import is not enough.
+ # Try to use MD5 function. In FIPS mode this will cause an exception
+ foo = md5.md5('abcd')
+ except:
+ import Constants
+ Constants.SIG_NIL = hash('abcd')
+ class replace_md5(object):
+ def __init__(self):
+ self.val = None
+ def update(self, val):
+ self.val = hash((self.val, val))
+ def digest(self):
+ return str(self.val)
+ def hexdigest(self):
+ return self.digest().encode('hex')
+ def replace_h_file(filename):
+ f = open(filename, 'rb')
+ m = replace_md5()
+ while (filename):
+ filename = f.read(100000)
+ m.update(filename)
+ f.close()
+ return m.digest()
+ Utils.md5 = replace_md5
+ Task.md5 = replace_md5
+ Utils.h_file = replace_h_file
+
+
+def LOAD_ENVIRONMENT():
+ '''load the configuration environment, allowing access to env vars
+ from new commands'''
+ import Environment
+ env = Environment.Environment()
+ try:
+ env.load('.lock-wscript')
+ env.load(env.blddir + '/c4che/default.cache.py')
+ except:
+ pass
+ return env
+
+
+def IS_NEWER(bld, file1, file2):
+ '''return True if file1 is newer than file2'''
+ t1 = os.stat(os.path.join(bld.curdir, file1)).st_mtime
+ t2 = os.stat(os.path.join(bld.curdir, file2)).st_mtime
+ return t1 > t2
+Build.BuildContext.IS_NEWER = IS_NEWER
+
+
+@conf
+def RECURSE(ctx, directory):
+ '''recurse into a directory, relative to the curdir or top level'''
+ try:
+ visited_dirs = ctx.visited_dirs
+ except:
+ visited_dirs = ctx.visited_dirs = set()
+ d = os.path.join(ctx.curdir, directory)
+ if os.path.exists(d):
+ abspath = os.path.abspath(d)
+ else:
+ abspath = os.path.abspath(os.path.join(Utils.g_module.srcdir, directory))
+ ctxclass = ctx.__class__.__name__
+ key = ctxclass + ':' + abspath
+ if key in visited_dirs:
+ # already done it
+ return
+ visited_dirs.add(key)
+ relpath = os_path_relpath(abspath, ctx.curdir)
+ if ctxclass == 'Handler':
+ return ctx.sub_options(relpath)
+ if ctxclass == 'ConfigurationContext':
+ return ctx.sub_config(relpath)
+ if ctxclass == 'BuildContext':
+ return ctx.add_subdirs(relpath)
+ Logs.error('Unknown RECURSE context class', ctxclass)
+ raise
+Options.Handler.RECURSE = RECURSE
+Build.BuildContext.RECURSE = RECURSE
+
+
+def CHECK_MAKEFLAGS(bld):
+ '''check for MAKEFLAGS environment variable in case we are being
+ called from a Makefile try to honor a few make command line flags'''
+ if not 'WAF_MAKE' in os.environ:
+ return
+ makeflags = os.environ.get('MAKEFLAGS')
+ if makeflags is None:
+ return
+ jobs_set = False
+ # we need to use shlex.split to cope with the escaping of spaces
+ # in makeflags
+ for opt in shlex.split(makeflags):
+ # options can come either as -x or as x
+ if opt[0:2] == 'V=':
+ Options.options.verbose = Logs.verbose = int(opt[2:])
+ if Logs.verbose > 0:
+ Logs.zones = ['runner']
+ if Logs.verbose > 2:
+ Logs.zones = ['*']
+ elif opt[0].isupper() and opt.find('=') != -1:
+ # this allows us to set waf options on the make command line
+ # for example, if you do "make FOO=blah", then we set the
+ # option 'FOO' in Options.options, to blah. If you look in wafsamba/wscript
+ # you will see that the command line accessible options have their dest=
+ # set to uppercase, to allow for passing of options from make in this way
+ # this is also how "make test TESTS=testpattern" works, and
+ # "make VERBOSE=1" as well as things like "make SYMBOLCHECK=1"
+ loc = opt.find('=')
+ setattr(Options.options, opt[0:loc], opt[loc+1:])
+ elif opt[0] != '-':
+ for v in opt:
+ if v == 'j':
+ jobs_set = True
+ elif v == 'k':
+ Options.options.keep = True
+ elif opt == '-j':
+ jobs_set = True
+ elif opt == '-k':
+ Options.options.keep = True
+ if not jobs_set:
+ # default to one job
+ Options.options.jobs = 1
+
+Build.BuildContext.CHECK_MAKEFLAGS = CHECK_MAKEFLAGS
+
+option_groups = {}
+
+def option_group(opt, name):
+ '''find or create an option group'''
+ global option_groups
+ if name in option_groups:
+ return option_groups[name]
+ gr = opt.add_option_group(name)
+ option_groups[name] = gr
+ return gr
+Options.Handler.option_group = option_group
+
+
+def save_file(filename, contents, create_dir=False):
+ '''save data to a file'''
+ if create_dir:
+ mkdir_p(os.path.dirname(filename))
+ try:
+ f = open(filename, 'w')
+ f.write(contents)
+ f.close()
+ except:
+ return False
+ return True
+
+
+def load_file(filename):
+ '''return contents of a file'''
+ try:
+ f = open(filename, 'r')
+ r = f.read()
+ f.close()
+ except:
+ return None
+ return r
+
+
+def reconfigure(ctx):
+ '''rerun configure if necessary'''
+ import Configure, samba_wildcard, Scripting
+ if not os.path.exists(".lock-wscript"):
+ raise Utils.WafError('configure has not been run')
+ bld = samba_wildcard.fake_build_environment()
+ Configure.autoconfig = True
+ Scripting.check_configured(bld)
+
+
+def map_shlib_extension(ctx, name, python=False):
+ '''map a filename with a shared library extension of .so to the real shlib name'''
+ if name is None:
+ return None
+ if name[-1:].isdigit():
+ # some libraries have specified versions in the wscript rule
+ return name
+ (root1, ext1) = os.path.splitext(name)
+ if python:
+ return ctx.env.pyext_PATTERN % root1
+ else:
+ (root2, ext2) = os.path.splitext(ctx.env.shlib_PATTERN)
+ return root1+ext2
+Build.BuildContext.map_shlib_extension = map_shlib_extension
+
+def apply_pattern(filename, pattern):
+ '''apply a filename pattern to a filename that may have a directory component'''
+ dirname = os.path.dirname(filename)
+ if not dirname:
+ return pattern % filename
+ basename = os.path.basename(filename)
+ return os.path.join(dirname, pattern % basename)
+
+def make_libname(ctx, name, nolibprefix=False, version=None, python=False):
+ """make a library filename
+ Options:
+ nolibprefix: don't include the lib prefix
+ version : add a version number
+ python : if we should use python module name conventions"""
+
+ if python:
+ libname = apply_pattern(name, ctx.env.pyext_PATTERN)
+ else:
+ libname = apply_pattern(name, ctx.env.shlib_PATTERN)
+ if nolibprefix and libname[0:3] == 'lib':
+ libname = libname[3:]
+ if version:
+ if version[0] == '.':
+ version = version[1:]
+ (root, ext) = os.path.splitext(libname)
+ if ext == ".dylib":
+ # special case - version goes before the prefix
+ libname = "%s.%s%s" % (root, version, ext)
+ else:
+ libname = "%s%s.%s" % (root, ext, version)
+ return libname
+Build.BuildContext.make_libname = make_libname
+
+
+def get_tgt_list(bld):
+ '''return a list of build objects for samba'''
+
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+
+ # build a list of task generators we are interested in
+ tgt_list = []
+ for tgt in targets:
+ type = targets[tgt]
+ if not type in ['SUBSYSTEM', 'MODULE', 'BINARY', 'LIBRARY', 'ASN1', 'PYTHON']:
+ continue
+ t = bld.name_to_obj(tgt, bld.env)
+ if t is None:
+ Logs.error("Target %s of type %s has no task generator" % (tgt, type))
+ sys.exit(1)
+ tgt_list.append(t)
+ return tgt_list
+
+from Constants import WSCRIPT_FILE
+def PROCESS_SEPARATE_RULE(self, rule):
+ ''' cause waf to process additional script based on `rule'.
+ You should have file named wscript_<stage>_rule in the current directory
+ where stage is either 'configure' or 'build'
+ '''
+ ctxclass = self.__class__.__name__
+ stage = ''
+ if ctxclass == 'ConfigurationContext':
+ stage = 'configure'
+ elif ctxclass == 'BuildContext':
+ stage = 'build'
+ file_path = os.path.join(self.curdir, WSCRIPT_FILE+'_'+stage+'_'+rule)
+ txt = load_file(file_path)
+ if txt:
+ dc = {'ctx': self}
+ if getattr(self.__class__, 'pre_recurse', None):
+ dc = self.pre_recurse(txt, file_path, self.curdir)
+ exec(compile(txt, file_path, 'exec'), dc)
+ if getattr(self.__class__, 'post_recurse', None):
+ dc = self.post_recurse(txt, file_path, self.curdir)
+
+Build.BuildContext.PROCESS_SEPARATE_RULE = PROCESS_SEPARATE_RULE
+ConfigurationContext.PROCESS_SEPARATE_RULE = PROCESS_SEPARATE_RULE
+
+def AD_DC_BUILD_IS_ENABLED(self):
+ if self.CONFIG_SET('AD_DC_BUILD_IS_ENABLED'):
+ return True
+ return False
+
+Build.BuildContext.AD_DC_BUILD_IS_ENABLED = AD_DC_BUILD_IS_ENABLED
diff --git a/buildtools/wafsamba/samba_version.py b/buildtools/wafsamba/samba_version.py
new file mode 100644
index 0000000..67ff232
--- /dev/null
+++ b/buildtools/wafsamba/samba_version.py
@@ -0,0 +1,321 @@
+import os
+import Utils
+import samba_utils
+import sys
+
+def bzr_version_summary(path):
+ try:
+ import bzrlib
+ except ImportError:
+ return ("BZR-UNKNOWN", {})
+
+ import bzrlib.ui
+ bzrlib.ui.ui_factory = bzrlib.ui.make_ui_for_terminal(
+ sys.stdin, sys.stdout, sys.stderr)
+ from bzrlib import branch, osutils, workingtree
+ from bzrlib.plugin import load_plugins
+ load_plugins()
+
+ b = branch.Branch.open(path)
+ (revno, revid) = b.last_revision_info()
+ rev = b.repository.get_revision(revid)
+
+ fields = {
+ "BZR_REVISION_ID": revid,
+ "BZR_REVNO": revno,
+ "COMMIT_DATE": osutils.format_date_with_offset_in_original_timezone(rev.timestamp,
+ rev.timezone or 0),
+ "COMMIT_TIME": int(rev.timestamp),
+ "BZR_BRANCH": rev.properties.get("branch-nick", ""),
+ }
+
+ # If possible, retrieve the git sha
+ try:
+ from bzrlib.plugins.git.object_store import get_object_store
+ except ImportError:
+ # No git plugin
+ ret = "BZR-%d" % revno
+ else:
+ store = get_object_store(b.repository)
+ store.lock_read()
+ try:
+ full_rev = store._lookup_revision_sha1(revid)
+ finally:
+ store.unlock()
+ fields["GIT_COMMIT_ABBREV"] = full_rev[:7]
+ fields["GIT_COMMIT_FULLREV"] = full_rev
+ ret = "GIT-" + fields["GIT_COMMIT_ABBREV"]
+
+ if workingtree.WorkingTree.open(path).has_changes():
+ fields["COMMIT_IS_CLEAN"] = 0
+ ret += "+"
+ else:
+ fields["COMMIT_IS_CLEAN"] = 1
+ return (ret, fields)
+
+
+def git_version_summary(path, env=None):
+ # Get version from GIT
+ if not 'GIT' in env and os.path.exists("/usr/bin/git"):
+ # this is useful when doing make dist without configuring
+ env.GIT = "/usr/bin/git"
+
+ if not 'GIT' in env:
+ return ("GIT-UNKNOWN", {})
+
+ environ = dict(os.environ)
+ environ["GIT_DIR"] = '%s/.git' % path
+ environ["GIT_WORK_TREE"] = path
+ git = Utils.cmd_output(env.GIT + ' show --pretty=format:"%h%n%ct%n%H%n%cd" --stat HEAD', silent=True, env=environ)
+
+ lines = git.splitlines()
+ if not lines or len(lines) < 4:
+ return ("GIT-UNKNOWN", {})
+
+ fields = {
+ "GIT_COMMIT_ABBREV": lines[0],
+ "GIT_COMMIT_FULLREV": lines[2],
+ "COMMIT_TIME": int(lines[1]),
+ "COMMIT_DATE": lines[3],
+ }
+
+ ret = "GIT-" + fields["GIT_COMMIT_ABBREV"]
+
+ if env.GIT_LOCAL_CHANGES:
+ clean = Utils.cmd_output('%s diff HEAD | wc -l' % env.GIT, silent=True).strip()
+ if clean == "0":
+ fields["COMMIT_IS_CLEAN"] = 1
+ else:
+ fields["COMMIT_IS_CLEAN"] = 0
+ ret += "+"
+
+ return (ret, fields)
+
+
+def distversion_version_summary(path):
+ #get version from .distversion file
+ f = open(path + '/.distversion', 'r')
+ suffix = None
+ fields = {}
+
+ for line in f:
+ line = line.strip()
+ if line == '':
+ continue
+ if line.startswith("#"):
+ continue
+ try:
+ split_line = line.split("=")
+ if split_line[1] != "":
+ key = split_line[0]
+ value = split_line[1]
+ if key == "SUFFIX":
+ suffix = value
+ continue
+ fields[key] = value
+ except:
+ print("Failed to parse line %s from .distversion file." % (line))
+ raise
+ f.close()
+
+ if "COMMIT_TIME" in fields:
+ fields["COMMIT_TIME"] = int(fields["COMMIT_TIME"])
+
+ if suffix is None:
+ return ("UNKNOWN", fields)
+
+ return (suffix, fields)
+
+
+class SambaVersion(object):
+
+ def __init__(self, version_dict, path, env=None, is_install=True):
+ '''Determine the version number of samba
+
+See VERSION for the format. Entries on that file are
+also accepted as dictionary entries here
+ '''
+
+ self.MAJOR=None
+ self.MINOR=None
+ self.RELEASE=None
+ self.REVISION=None
+ self.TP_RELEASE=None
+ self.ALPHA_RELEASE=None
+ self.BETA_RELEASE=None
+ self.PRE_RELEASE=None
+ self.RC_RELEASE=None
+ self.IS_SNAPSHOT=True
+ self.RELEASE_NICKNAME=None
+ self.VENDOR_SUFFIX=None
+ self.VENDOR_PATCH=None
+
+ for a, b in version_dict.iteritems():
+ if a.startswith("SAMBA_VERSION_"):
+ setattr(self, a[14:], b)
+ else:
+ setattr(self, a, b)
+
+ if self.IS_GIT_SNAPSHOT == "yes":
+ self.IS_SNAPSHOT=True
+ elif self.IS_GIT_SNAPSHOT == "no":
+ self.IS_SNAPSHOT=False
+ else:
+ raise Exception("Unknown value for IS_GIT_SNAPSHOT: %s" % self.IS_GIT_SNAPSHOT)
+
+ ##
+ ## start with "3.0.22"
+ ##
+ self.MAJOR=int(self.MAJOR)
+ self.MINOR=int(self.MINOR)
+ self.RELEASE=int(self.RELEASE)
+
+ SAMBA_VERSION_STRING = ("%u.%u.%u" % (self.MAJOR, self.MINOR, self.RELEASE))
+
+##
+## maybe add "3.0.22a" or "4.0.0tp11" or "4.0.0alpha1" or "4.0.0beta1" or "3.0.22pre1" or "3.0.22rc1"
+## We do not do pre or rc version on patch/letter releases
+##
+ if self.REVISION is not None:
+ SAMBA_VERSION_STRING += self.REVISION
+ if self.TP_RELEASE is not None:
+ self.TP_RELEASE = int(self.TP_RELEASE)
+ SAMBA_VERSION_STRING += "tp%u" % self.TP_RELEASE
+ if self.ALPHA_RELEASE is not None:
+ self.ALPHA_RELEASE = int(self.ALPHA_RELEASE)
+ SAMBA_VERSION_STRING += ("alpha%u" % self.ALPHA_RELEASE)
+ if self.BETA_RELEASE is not None:
+ self.BETA_RELEASE = int(self.BETA_RELEASE)
+ SAMBA_VERSION_STRING += ("beta%u" % self.BETA_RELEASE)
+ if self.PRE_RELEASE is not None:
+ self.PRE_RELEASE = int(self.PRE_RELEASE)
+ SAMBA_VERSION_STRING += ("pre%u" % self.PRE_RELEASE)
+ if self.RC_RELEASE is not None:
+ self.RC_RELEASE = int(self.RC_RELEASE)
+ SAMBA_VERSION_STRING += ("rc%u" % self.RC_RELEASE)
+
+ if self.IS_SNAPSHOT:
+ if not is_install:
+ suffix = "DEVELOPERBUILD"
+ self.vcs_fields = {}
+ elif os.path.exists(os.path.join(path, ".git")):
+ suffix, self.vcs_fields = git_version_summary(path, env=env)
+ elif os.path.exists(os.path.join(path, ".bzr")):
+ suffix, self.vcs_fields = bzr_version_summary(path)
+ elif os.path.exists(os.path.join(path, ".distversion")):
+ suffix, self.vcs_fields = distversion_version_summary(path)
+ else:
+ suffix = "UNKNOWN"
+ self.vcs_fields = {}
+ self.vcs_fields["SUFFIX"] = suffix
+ SAMBA_VERSION_STRING += "-" + suffix
+ else:
+ self.vcs_fields = {}
+
+ self.OFFICIAL_STRING = SAMBA_VERSION_STRING
+
+ if self.VENDOR_SUFFIX is not None:
+ SAMBA_VERSION_STRING += ("-" + self.VENDOR_SUFFIX)
+ self.VENDOR_SUFFIX = self.VENDOR_SUFFIX
+
+ if self.VENDOR_PATCH is not None:
+ SAMBA_VERSION_STRING += ("-" + self.VENDOR_PATCH)
+ self.VENDOR_PATCH = self.VENDOR_PATCH
+
+ self.STRING = SAMBA_VERSION_STRING
+
+ if self.RELEASE_NICKNAME is not None:
+ self.STRING_WITH_NICKNAME = "%s (%s)" % (self.STRING, self.RELEASE_NICKNAME)
+ else:
+ self.STRING_WITH_NICKNAME = self.STRING
+
+ def __str__(self):
+ string="/* Autogenerated by waf */\n"
+ string+="#define SAMBA_VERSION_MAJOR %u\n" % self.MAJOR
+ string+="#define SAMBA_VERSION_MINOR %u\n" % self.MINOR
+ string+="#define SAMBA_VERSION_RELEASE %u\n" % self.RELEASE
+ if self.REVISION is not None:
+ string+="#define SAMBA_VERSION_REVISION %u\n" % self.REVISION
+
+ if self.TP_RELEASE is not None:
+ string+="#define SAMBA_VERSION_TP_RELEASE %u\n" % self.TP_RELEASE
+
+ if self.ALPHA_RELEASE is not None:
+ string+="#define SAMBA_VERSION_ALPHA_RELEASE %u\n" % self.ALPHA_RELEASE
+
+ if self.BETA_RELEASE is not None:
+ string+="#define SAMBA_VERSION_BETA_RELEASE %u\n" % self.BETA_RELEASE
+
+ if self.PRE_RELEASE is not None:
+ string+="#define SAMBA_VERSION_PRE_RELEASE %u\n" % self.PRE_RELEASE
+
+ if self.RC_RELEASE is not None:
+ string+="#define SAMBA_VERSION_RC_RELEASE %u\n" % self.RC_RELEASE
+
+ for name in sorted(self.vcs_fields.keys()):
+ string+="#define SAMBA_VERSION_%s " % name
+ value = self.vcs_fields[name]
+ if isinstance(value, basestring):
+ string += "\"%s\"" % value
+ elif type(value) is int:
+ string += "%d" % value
+ else:
+ raise Exception("Unknown type for %s: %r" % (name, value))
+ string += "\n"
+
+ string+="#define SAMBA_VERSION_OFFICIAL_STRING \"" + self.OFFICIAL_STRING + "\"\n"
+
+ if self.VENDOR_SUFFIX is not None:
+ string+="#define SAMBA_VERSION_VENDOR_SUFFIX " + self.VENDOR_SUFFIX + "\n"
+ if self.VENDOR_PATCH is not None:
+ string+="#define SAMBA_VERSION_VENDOR_PATCH " + self.VENDOR_PATCH + "\n"
+
+ if self.RELEASE_NICKNAME is not None:
+ string+="#define SAMBA_VERSION_RELEASE_NICKNAME " + self.RELEASE_NICKNAME + "\n"
+
+ # We need to put this #ifdef in to the headers so that vendors can override the version with a function
+ string+='''
+#ifdef SAMBA_VERSION_VENDOR_FUNCTION
+# define SAMBA_VERSION_STRING SAMBA_VERSION_VENDOR_FUNCTION
+#else /* SAMBA_VERSION_VENDOR_FUNCTION */
+# define SAMBA_VERSION_STRING "''' + self.STRING_WITH_NICKNAME + '''"
+#endif
+'''
+ string+="/* Version for mkrelease.sh: \nSAMBA_VERSION_STRING=" + self.STRING_WITH_NICKNAME + "\n */\n"
+
+ return string
+
+
+def samba_version_file(version_file, path, env=None, is_install=True):
+ '''Parse the version information from a VERSION file'''
+
+ f = open(version_file, 'r')
+ version_dict = {}
+ for line in f:
+ line = line.strip()
+ if line == '':
+ continue
+ if line.startswith("#"):
+ continue
+ try:
+ split_line = line.split("=")
+ if split_line[1] != "":
+ value = split_line[1].strip('"')
+ version_dict[split_line[0]] = value
+ except:
+ print("Failed to parse line %s from %s" % (line, version_file))
+ raise
+
+ return SambaVersion(version_dict, path, env=env, is_install=is_install)
+
+
+
+def load_version(env=None, is_install=True):
+ '''load samba versions either from ./VERSION or git
+ return a version object for detailed breakdown'''
+ if not env:
+ env = samba_utils.LOAD_ENVIRONMENT()
+
+ version = samba_version_file("./VERSION", ".", env, is_install=is_install)
+ Utils.g_module.VERSION = version.STRING
+ return version
diff --git a/buildtools/wafsamba/samba_wildcard.py b/buildtools/wafsamba/samba_wildcard.py
new file mode 100644
index 0000000..84503b8
--- /dev/null
+++ b/buildtools/wafsamba/samba_wildcard.py
@@ -0,0 +1,153 @@
+# based on playground/evil in the waf svn tree
+
+import os, datetime
+import Scripting, Utils, Options, Logs, Environment, fnmatch
+from Constants import *
+from samba_utils import *
+
+def run_task(t, k):
+ '''run a single build task'''
+ ret = t.run()
+ if ret:
+ raise Utils.WafError("Failed to build %s: %u" % (k, ret))
+
+
+def run_named_build_task(cmd):
+ '''run a named build task, matching the cmd name using fnmatch
+ wildcards against inputs and outputs of all build tasks'''
+ bld = fake_build_environment(info=False)
+ found = False
+ cwd_node = bld.root.find_dir(os.getcwd())
+ top_node = bld.root.find_dir(bld.srcnode.abspath())
+
+ cmd = os.path.normpath(cmd)
+
+ # cope with builds of bin/*/*
+ if os.path.islink(cmd):
+ cmd = os_path_relpath(os.readlink(cmd), os.getcwd())
+
+ if cmd[0:12] == "bin/default/":
+ cmd = cmd[12:]
+
+ for g in bld.task_manager.groups:
+ for attr in ['outputs', 'inputs']:
+ for t in g.tasks:
+ s = getattr(t, attr, [])
+ for k in s:
+ relpath1 = k.relpath_gen(cwd_node)
+ relpath2 = k.relpath_gen(top_node)
+ if (fnmatch.fnmatch(relpath1, cmd) or
+ fnmatch.fnmatch(relpath2, cmd)):
+ t.position = [0,0]
+ print(t.display())
+ run_task(t, k)
+ found = True
+
+
+ if not found:
+ raise Utils.WafError("Unable to find build target matching %s" % cmd)
+
+
+def rewrite_compile_targets():
+ '''cope with the bin/ form of compile target'''
+ if not Options.options.compile_targets:
+ return
+
+ bld = fake_build_environment(info=False)
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+ tlist = []
+
+ for t in Options.options.compile_targets.split(','):
+ if not os.path.islink(t):
+ tlist.append(t)
+ continue
+ link = os.readlink(t)
+ list = link.split('/')
+ for name in [list[-1], '/'.join(list[-2:])]:
+ if name in targets:
+ tlist.append(name)
+ continue
+ Options.options.compile_targets = ",".join(tlist)
+
+
+
+def wildcard_main(missing_cmd_fn):
+ '''this replaces main from Scripting, allowing us to override the
+ behaviour for unknown commands
+
+ If a unknown command is found, then missing_cmd_fn() is called with
+ the name of the requested command
+ '''
+ Scripting.commands = Options.arg_line[:]
+
+ # rewrite the compile targets to cope with the bin/xx form
+ rewrite_compile_targets()
+
+ while Scripting.commands:
+ x = Scripting.commands.pop(0)
+
+ ini = datetime.datetime.now()
+ if x == 'configure':
+ fun = Scripting.configure
+ elif x == 'build':
+ fun = Scripting.build
+ else:
+ fun = getattr(Utils.g_module, x, None)
+
+ # this is the new addition on top of main from Scripting.py
+ if not fun:
+ missing_cmd_fn(x)
+ break
+
+ ctx = getattr(Utils.g_module, x + '_context', Utils.Context)()
+
+ if x in ['init', 'shutdown', 'dist', 'distclean', 'distcheck']:
+ try:
+ fun(ctx)
+ except TypeError:
+ fun()
+ else:
+ fun(ctx)
+
+ ela = ''
+ if not Options.options.progress_bar:
+ ela = ' (%s)' % Utils.get_elapsed_time(ini)
+
+ if x != 'init' and x != 'shutdown':
+ Logs.info('%r finished successfully%s' % (x, ela))
+
+ if not Scripting.commands and x != 'shutdown':
+ Scripting.commands.append('shutdown')
+
+
+
+
+def fake_build_environment(info=True, flush=False):
+ """create all the tasks for the project, but do not run the build
+ return the build context in use"""
+ bld = getattr(Utils.g_module, 'build_context', Utils.Context)()
+ bld = Scripting.check_configured(bld)
+
+ Options.commands['install'] = False
+ Options.commands['uninstall'] = False
+ Options.is_install = False
+
+ bld.is_install = 0 # False
+
+ try:
+ proj = Environment.Environment(Options.lockfile)
+ except IOError:
+ raise Utils.WafError("Project not configured (run 'waf configure' first)")
+
+ bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
+ bld.load_envs()
+
+ if info:
+ Logs.info("Waf: Entering directory `%s'" % bld.bldnode.abspath())
+ bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]])
+
+ bld.pre_build()
+ if flush:
+ bld.flush()
+ return bld
+
diff --git a/buildtools/wafsamba/stale_files.py b/buildtools/wafsamba/stale_files.py
new file mode 100644
index 0000000..2dd08e1
--- /dev/null
+++ b/buildtools/wafsamba/stale_files.py
@@ -0,0 +1,111 @@
+# encoding: utf-8
+# Thomas Nagy, 2006-2010 (ita)
+
+"""
+Add a pre-build hook to remove all build files
+which do not have a corresponding target
+
+This can be used for example to remove the targets
+that have changed name without performing
+a full 'waf clean'
+
+Of course, it will only work if there are no dynamically generated
+nodes/tasks, in which case the method will have to be modified
+to exclude some folders for example.
+"""
+
+import Logs, Build, os, samba_utils, Options, Utils
+from Runner import Parallel
+
+old_refill_task_list = Parallel.refill_task_list
+def replace_refill_task_list(self):
+ '''replacement for refill_task_list() that deletes stale files'''
+
+ iit = old_refill_task_list(self)
+ bld = self.bld
+
+ if not getattr(bld, 'new_rules', False):
+ # we only need to check for stale files if the build rules changed
+ return iit
+
+ if Options.options.compile_targets:
+ # not safe when --target is used
+ return iit
+
+ # execute only once
+ if getattr(self, 'cleanup_done', False):
+ return iit
+ self.cleanup_done = True
+
+ def group_name(g):
+ tm = self.bld.task_manager
+ return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0]
+
+ bin_base = bld.bldnode.abspath()
+ bin_base_len = len(bin_base)
+
+ # paranoia
+ if bin_base[-4:] != '/bin':
+ raise Utils.WafError("Invalid bin base: %s" % bin_base)
+
+ # obtain the expected list of files
+ expected = []
+ for i in range(len(bld.task_manager.groups)):
+ g = bld.task_manager.groups[i]
+ tasks = g.tasks_gen
+ for x in tasks:
+ try:
+ if getattr(x, 'target'):
+ tlist = samba_utils.TO_LIST(getattr(x, 'target'))
+ ttype = getattr(x, 'samba_type', None)
+ task_list = getattr(x, 'compiled_tasks', [])
+ if task_list:
+ # this gets all of the .o files, including the task
+ # ids, so foo.c maps to foo_3.o for idx=3
+ for tsk in task_list:
+ for output in tsk.outputs:
+ objpath = os.path.normpath(output.abspath(bld.env))
+ expected.append(objpath)
+ for t in tlist:
+ if ttype in ['LIBRARY','MODULE']:
+ t = samba_utils.apply_pattern(t, bld.env.shlib_PATTERN)
+ if ttype == 'PYTHON':
+ t = samba_utils.apply_pattern(t, bld.env.pyext_PATTERN)
+ p = os.path.join(x.path.abspath(bld.env), t)
+ p = os.path.normpath(p)
+ expected.append(p)
+ for n in x.allnodes:
+ p = n.abspath(bld.env)
+ if p[0:bin_base_len] == bin_base:
+ expected.append(p)
+ except:
+ pass
+
+ for root, dirs, files in os.walk(bin_base):
+ for f in files:
+ p = root + '/' + f
+ if os.path.islink(p):
+ link = os.readlink(p)
+ if link[0:bin_base_len] == bin_base:
+ p = link
+ if f in ['config.h']:
+ continue
+ (froot, fext) = os.path.splitext(f)
+ if fext not in [ '.c', '.h', '.so', '.o' ]:
+ continue
+ if f[-7:] == '.inst.h':
+ continue
+ if p.find("/.conf") != -1:
+ continue
+ if not p in expected and os.path.exists(p):
+ Logs.warn("Removing stale file: %s" % p)
+ os.unlink(p)
+ return iit
+
+
+def AUTOCLEANUP_STALE_FILES(bld):
+ """automatically clean up any files in bin that shouldn't be there"""
+ old_refill_task_list = Parallel.refill_task_list
+ Parallel.refill_task_list = replace_refill_task_list
+ Parallel.bld = bld
+Build.BuildContext.AUTOCLEANUP_STALE_FILES = AUTOCLEANUP_STALE_FILES
diff --git a/buildtools/wafsamba/symbols.py b/buildtools/wafsamba/symbols.py
new file mode 100644
index 0000000..daa18b9
--- /dev/null
+++ b/buildtools/wafsamba/symbols.py
@@ -0,0 +1,656 @@
+# a waf tool to extract symbols from object files or libraries
+# using nm, producing a set of exposed defined/undefined symbols
+
+import Utils, Build, subprocess, Logs, re
+from samba_wildcard import fake_build_environment
+from samba_utils import *
+
+# these are the data structures used in symbols.py:
+#
+# bld.env.symbol_map : dictionary mapping public symbol names to list of
+# subsystem names where that symbol exists
+#
+# t.in_library : list of libraries that t is in
+#
+# bld.env.public_symbols: set of public symbols for each subsystem
+# bld.env.used_symbols : set of used symbols for each subsystem
+#
+# bld.env.syslib_symbols: dictionary mapping system library name to set of symbols
+# for that library
+# bld.env.library_dict : dictionary mapping built library paths to subsystem names
+#
+# LOCAL_CACHE(bld, 'TARGET_TYPE') : dictionary mapping subsystem name to target type
+
+
+def symbols_extract(bld, objfiles, dynamic=False):
+ '''extract symbols from objfile, returning a dictionary containing
+ the set of undefined and public symbols for each file'''
+
+ ret = {}
+
+ # see if we can get some results from the nm cache
+ if not bld.env.nm_cache:
+ bld.env.nm_cache = {}
+
+ objfiles = set(objfiles).copy()
+
+ remaining = set()
+ for obj in objfiles:
+ if obj in bld.env.nm_cache:
+ ret[obj] = bld.env.nm_cache[obj].copy()
+ else:
+ remaining.add(obj)
+ objfiles = remaining
+
+ if len(objfiles) == 0:
+ return ret
+
+ cmd = ["nm"]
+ if dynamic:
+ # needed for some .so files
+ cmd.append("-D")
+ cmd.extend(list(objfiles))
+
+ nmpipe = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
+ if len(objfiles) == 1:
+ filename = list(objfiles)[0]
+ ret[filename] = { "PUBLIC": set(), "UNDEFINED" : set()}
+
+ for line in nmpipe:
+ line = line.strip()
+ if line.endswith(':'):
+ filename = line[:-1]
+ ret[filename] = { "PUBLIC": set(), "UNDEFINED" : set() }
+ continue
+ cols = line.split(" ")
+ if cols == ['']:
+ continue
+ # see if the line starts with an address
+ if len(cols) == 3:
+ symbol_type = cols[1]
+ symbol = cols[2]
+ else:
+ symbol_type = cols[0]
+ symbol = cols[1]
+ if symbol_type in "BDGTRVWSi":
+ # its a public symbol
+ ret[filename]["PUBLIC"].add(symbol)
+ elif symbol_type in "U":
+ ret[filename]["UNDEFINED"].add(symbol)
+
+ # add to the cache
+ for obj in objfiles:
+ if obj in ret:
+ bld.env.nm_cache[obj] = ret[obj].copy()
+ else:
+ bld.env.nm_cache[obj] = { "PUBLIC": set(), "UNDEFINED" : set() }
+
+ return ret
+
+
+def real_name(name):
+ if name.find(".objlist") != -1:
+ name = name[:-8]
+ return name
+
+
+def find_ldd_path(bld, libname, binary):
+ '''find the path to the syslib we will link against'''
+ ret = None
+ if not bld.env.syslib_paths:
+ bld.env.syslib_paths = {}
+ if libname in bld.env.syslib_paths:
+ return bld.env.syslib_paths[libname]
+
+ lddpipe = subprocess.Popen(['ldd', binary], stdout=subprocess.PIPE).stdout
+ for line in lddpipe:
+ line = line.strip()
+ cols = line.split(" ")
+ if len(cols) < 3 or cols[1] != "=>":
+ continue
+ if cols[0].startswith("libc."):
+ # save this one too
+ bld.env.libc_path = cols[2]
+ if cols[0].startswith(libname):
+ ret = cols[2]
+ bld.env.syslib_paths[libname] = ret
+ return ret
+
+
+# some regular expressions for parsing readelf output
+re_sharedlib = re.compile('Shared library: \[(.*)\]')
+re_rpath = re.compile('Library rpath: \[(.*)\]')
+
+def get_libs(bld, binname):
+ '''find the list of linked libraries for any binary or library
+ binname is the path to the binary/library on disk
+
+ We do this using readelf instead of ldd as we need to avoid recursing
+ into system libraries
+ '''
+
+ # see if we can get the result from the ldd cache
+ if not bld.env.lib_cache:
+ bld.env.lib_cache = {}
+ if binname in bld.env.lib_cache:
+ return bld.env.lib_cache[binname].copy()
+
+ rpath = []
+ libs = set()
+
+ elfpipe = subprocess.Popen(['readelf', '--dynamic', binname], stdout=subprocess.PIPE).stdout
+ for line in elfpipe:
+ m = re_sharedlib.search(line)
+ if m:
+ libs.add(m.group(1))
+ m = re_rpath.search(line)
+ if m:
+ rpath.extend(m.group(1).split(":"))
+
+ ret = set()
+ for lib in libs:
+ found = False
+ for r in rpath:
+ path = os.path.join(r, lib)
+ if os.path.exists(path):
+ ret.add(os.path.realpath(path))
+ found = True
+ break
+ if not found:
+ # we didn't find this lib using rpath. It is probably a system
+ # library, so to find the path to it we either need to use ldd
+ # or we need to start parsing /etc/ld.so.conf* ourselves. We'll
+ # use ldd for now, even though it is slow
+ path = find_ldd_path(bld, lib, binname)
+ if path:
+ ret.add(os.path.realpath(path))
+
+ bld.env.lib_cache[binname] = ret.copy()
+
+ return ret
+
+
+def get_libs_recursive(bld, binname, seen):
+ '''find the recursive list of linked libraries for any binary or library
+ binname is the path to the binary/library on disk. seen is a set used
+ to prevent loops
+ '''
+ if binname in seen:
+ return set()
+ ret = get_libs(bld, binname)
+ seen.add(binname)
+ for lib in ret:
+ # we don't want to recurse into system libraries. If a system
+ # library that we use (eg. libcups) happens to use another library
+ # (such as libkrb5) which contains common symbols with our own
+ # libraries, then that is not an error
+ if lib in bld.env.library_dict:
+ ret = ret.union(get_libs_recursive(bld, lib, seen))
+ return ret
+
+
+
+def find_syslib_path(bld, libname, deps):
+ '''find the path to the syslib we will link against'''
+ # the strategy is to use the targets that depend on the library, and run ldd
+ # on it to find the real location of the library that is used
+
+ linkpath = deps[0].link_task.outputs[0].abspath(bld.env)
+
+ if libname == "python":
+ libname += bld.env.PYTHON_VERSION
+
+ return find_ldd_path(bld, "lib%s" % libname.lower(), linkpath)
+
+
+def build_symbol_sets(bld, tgt_list):
+ '''build the public_symbols and undefined_symbols attributes for each target'''
+
+ if bld.env.public_symbols:
+ return
+
+ objlist = [] # list of object file
+ objmap = {} # map from object filename to target (subsystem) name
+
+ for t in tgt_list:
+ t.public_symbols = set()
+ t.undefined_symbols = set()
+ t.used_symbols = set()
+ for tsk in getattr(t, 'compiled_tasks', []):
+ for output in tsk.outputs:
+ objpath = output.abspath(bld.env)
+ objlist.append(objpath)
+ objmap[objpath] = t
+
+ symbols = symbols_extract(bld, objlist)
+ for obj in objlist:
+ t = objmap[obj]
+ t.public_symbols = t.public_symbols.union(symbols[obj]["PUBLIC"])
+ t.undefined_symbols = t.undefined_symbols.union(symbols[obj]["UNDEFINED"])
+ t.used_symbols = t.used_symbols.union(symbols[obj]["UNDEFINED"])
+
+ t.undefined_symbols = t.undefined_symbols.difference(t.public_symbols)
+
+ # and the reverse map of public symbols to subsystem name
+ bld.env.symbol_map = {}
+
+ for t in tgt_list:
+ for s in t.public_symbols:
+ if not s in bld.env.symbol_map:
+ bld.env.symbol_map[s] = []
+ bld.env.symbol_map[s].append(real_name(t.sname))
+
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+
+ bld.env.public_symbols = {}
+ for t in tgt_list:
+ name = real_name(t.sname)
+ if name in bld.env.public_symbols:
+ bld.env.public_symbols[name] = bld.env.public_symbols[name].union(t.public_symbols)
+ else:
+ bld.env.public_symbols[name] = t.public_symbols
+ if t.samba_type == 'LIBRARY':
+ for dep in t.add_objects:
+ t2 = bld.name_to_obj(dep, bld.env)
+ bld.ASSERT(t2 is not None, "Library '%s' has unknown dependency '%s'" % (name, dep))
+ bld.env.public_symbols[name] = bld.env.public_symbols[name].union(t2.public_symbols)
+
+ bld.env.used_symbols = {}
+ for t in tgt_list:
+ name = real_name(t.sname)
+ if name in bld.env.used_symbols:
+ bld.env.used_symbols[name] = bld.env.used_symbols[name].union(t.used_symbols)
+ else:
+ bld.env.used_symbols[name] = t.used_symbols
+ if t.samba_type == 'LIBRARY':
+ for dep in t.add_objects:
+ t2 = bld.name_to_obj(dep, bld.env)
+ bld.ASSERT(t2 is not None, "Library '%s' has unknown dependency '%s'" % (name, dep))
+ bld.env.used_symbols[name] = bld.env.used_symbols[name].union(t2.used_symbols)
+
+
+def build_library_dict(bld, tgt_list):
+ '''build the library_dict dictionary'''
+
+ if bld.env.library_dict:
+ return
+
+ bld.env.library_dict = {}
+
+ for t in tgt_list:
+ if t.samba_type in [ 'LIBRARY', 'PYTHON' ]:
+ linkpath = os.path.realpath(t.link_task.outputs[0].abspath(bld.env))
+ bld.env.library_dict[linkpath] = t.sname
+
+
+def build_syslib_sets(bld, tgt_list):
+ '''build the public_symbols for all syslibs'''
+
+ if bld.env.syslib_symbols:
+ return
+
+ # work out what syslibs we depend on, and what targets those are used in
+ syslibs = {}
+ objmap = {}
+ for t in tgt_list:
+ if getattr(t, 'uselib', []) and t.samba_type in [ 'LIBRARY', 'BINARY', 'PYTHON' ]:
+ for lib in t.uselib:
+ if lib in ['PYEMBED', 'PYEXT']:
+ lib = "python"
+ if not lib in syslibs:
+ syslibs[lib] = []
+ syslibs[lib].append(t)
+
+ # work out the paths to each syslib
+ syslib_paths = []
+ for lib in syslibs:
+ path = find_syslib_path(bld, lib, syslibs[lib])
+ if path is None:
+ Logs.warn("Unable to find syslib path for %s" % lib)
+ if path is not None:
+ syslib_paths.append(path)
+ objmap[path] = lib.lower()
+
+ # add in libc
+ syslib_paths.append(bld.env.libc_path)
+ objmap[bld.env.libc_path] = 'c'
+
+ symbols = symbols_extract(bld, syslib_paths, dynamic=True)
+
+ # keep a map of syslib names to public symbols
+ bld.env.syslib_symbols = {}
+ for lib in symbols:
+ bld.env.syslib_symbols[lib] = symbols[lib]["PUBLIC"]
+
+ # add to the map of symbols to dependencies
+ for lib in symbols:
+ for sym in symbols[lib]["PUBLIC"]:
+ if not sym in bld.env.symbol_map:
+ bld.env.symbol_map[sym] = []
+ bld.env.symbol_map[sym].append(objmap[lib])
+
+ # keep the libc symbols as well, as these are useful for some of the
+ # sanity checks
+ bld.env.libc_symbols = symbols[bld.env.libc_path]["PUBLIC"]
+
+ # add to the combined map of dependency name to public_symbols
+ for lib in bld.env.syslib_symbols:
+ bld.env.public_symbols[objmap[lib]] = bld.env.syslib_symbols[lib]
+
+
+def build_autodeps(bld, t):
+ '''build the set of dependencies for a target'''
+ deps = set()
+ name = real_name(t.sname)
+
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+
+ for sym in t.undefined_symbols:
+ if sym in t.public_symbols:
+ continue
+ if sym in bld.env.symbol_map:
+ depname = bld.env.symbol_map[sym]
+ if depname == [ name ]:
+ # self dependencies aren't interesting
+ continue
+ if t.in_library == depname:
+ # no need to depend on the library we are part of
+ continue
+ if depname[0] in ['c', 'python']:
+ # these don't go into autodeps
+ continue
+ if targets[depname[0]] in [ 'SYSLIB' ]:
+ deps.add(depname[0])
+ continue
+ t2 = bld.name_to_obj(depname[0], bld.env)
+ if len(t2.in_library) != 1:
+ deps.add(depname[0])
+ continue
+ if t2.in_library == t.in_library:
+ # if we're part of the same library, we don't need to autodep
+ continue
+ deps.add(t2.in_library[0])
+ t.autodeps = deps
+
+
+def build_library_names(bld, tgt_list):
+ '''add a in_library attribute to all targets that are part of a library'''
+
+ if bld.env.done_build_library_names:
+ return
+
+ for t in tgt_list:
+ t.in_library = []
+
+ for t in tgt_list:
+ if t.samba_type in [ 'LIBRARY' ]:
+ for obj in t.samba_deps_extended:
+ t2 = bld.name_to_obj(obj, bld.env)
+ if t2 and t2.samba_type in [ 'SUBSYSTEM', 'ASN1' ]:
+ if not t.sname in t2.in_library:
+ t2.in_library.append(t.sname)
+ bld.env.done_build_library_names = True
+
+
+def check_library_deps(bld, t):
+ '''check that all the autodeps that have mutual dependency of this
+ target are in the same library as the target'''
+
+ name = real_name(t.sname)
+
+ if len(t.in_library) > 1:
+ Logs.warn("WARNING: Target '%s' in multiple libraries: %s" % (t.sname, t.in_library))
+
+ for dep in t.autodeps:
+ t2 = bld.name_to_obj(dep, bld.env)
+ if t2 is None:
+ continue
+ for dep2 in t2.autodeps:
+ if dep2 == name and t.in_library != t2.in_library:
+ Logs.warn("WARNING: mutual dependency %s <=> %s" % (name, real_name(t2.sname)))
+ Logs.warn("Libraries should match. %s != %s" % (t.in_library, t2.in_library))
+ # raise Utils.WafError("illegal mutual dependency")
+
+
+def check_syslib_collisions(bld, tgt_list):
+ '''check if a target has any symbol collisions with a syslib
+
+ We do not want any code in Samba to use a symbol name from a
+ system library. The chance of that causing problems is just too
+ high. Note that libreplace uses a rep_XX approach of renaming
+ symbols via macros
+ '''
+
+ has_error = False
+ for t in tgt_list:
+ for lib in bld.env.syslib_symbols:
+ common = t.public_symbols.intersection(bld.env.syslib_symbols[lib])
+ if common:
+ Logs.error("ERROR: Target '%s' has symbols '%s' which is also in syslib '%s'" % (t.sname, common, lib))
+ has_error = True
+ if has_error:
+ raise Utils.WafError("symbols in common with system libraries")
+
+
+def check_dependencies(bld, t):
+ '''check for depenencies that should be changed'''
+
+ if bld.name_to_obj(t.sname + ".objlist", bld.env):
+ return
+
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+
+ remaining = t.undefined_symbols.copy()
+ remaining = remaining.difference(t.public_symbols)
+
+ sname = real_name(t.sname)
+
+ deps = set(t.samba_deps)
+ for d in t.samba_deps:
+ if targets[d] in [ 'EMPTY', 'DISABLED', 'SYSLIB', 'GENERATOR' ]:
+ continue
+ bld.ASSERT(d in bld.env.public_symbols, "Failed to find symbol list for dependency '%s'" % d)
+ diff = remaining.intersection(bld.env.public_symbols[d])
+ if not diff and targets[sname] != 'LIBRARY':
+ Logs.info("Target '%s' has no dependency on %s" % (sname, d))
+ else:
+ remaining = remaining.difference(diff)
+
+ t.unsatisfied_symbols = set()
+ needed = {}
+ for sym in remaining:
+ if sym in bld.env.symbol_map:
+ dep = bld.env.symbol_map[sym]
+ if not dep[0] in needed:
+ needed[dep[0]] = set()
+ needed[dep[0]].add(sym)
+ else:
+ t.unsatisfied_symbols.add(sym)
+
+ for dep in needed:
+ Logs.info("Target '%s' should add dep '%s' for symbols %s" % (sname, dep, " ".join(needed[dep])))
+
+
+
+def check_syslib_dependencies(bld, t):
+ '''check for syslib depenencies'''
+
+ if bld.name_to_obj(t.sname + ".objlist", bld.env):
+ return
+
+ sname = real_name(t.sname)
+
+ remaining = set()
+
+ features = TO_LIST(t.features)
+ if 'pyembed' in features or 'pyext' in features:
+ if 'python' in bld.env.public_symbols:
+ t.unsatisfied_symbols = t.unsatisfied_symbols.difference(bld.env.public_symbols['python'])
+
+ needed = {}
+ for sym in t.unsatisfied_symbols:
+ if sym in bld.env.symbol_map:
+ dep = bld.env.symbol_map[sym][0]
+ if dep == 'c':
+ continue
+ if not dep in needed:
+ needed[dep] = set()
+ needed[dep].add(sym)
+ else:
+ remaining.add(sym)
+
+ for dep in needed:
+ Logs.info("Target '%s' should add syslib dep '%s' for symbols %s" % (sname, dep, " ".join(needed[dep])))
+
+ if remaining:
+ debug("deps: Target '%s' has unsatisfied symbols: %s" % (sname, " ".join(remaining)))
+
+
+
+def symbols_symbolcheck(task):
+ '''check the internal dependency lists'''
+ bld = task.env.bld
+ tgt_list = get_tgt_list(bld)
+
+ build_symbol_sets(bld, tgt_list)
+ build_library_names(bld, tgt_list)
+
+ for t in tgt_list:
+ t.autodeps = set()
+ if getattr(t, 'source', ''):
+ build_autodeps(bld, t)
+
+ for t in tgt_list:
+ check_dependencies(bld, t)
+
+ for t in tgt_list:
+ check_library_deps(bld, t)
+
+def symbols_syslibcheck(task):
+ '''check the syslib dependencies'''
+ bld = task.env.bld
+ tgt_list = get_tgt_list(bld)
+
+ build_syslib_sets(bld, tgt_list)
+ check_syslib_collisions(bld, tgt_list)
+
+ for t in tgt_list:
+ check_syslib_dependencies(bld, t)
+
+
+def symbols_whyneeded(task):
+ """check why 'target' needs to link to 'subsystem'"""
+ bld = task.env.bld
+ tgt_list = get_tgt_list(bld)
+
+ why = Options.options.WHYNEEDED.split(":")
+ if len(why) != 2:
+ raise Utils.WafError("usage: WHYNEEDED=TARGET:DEPENDENCY")
+ target = why[0]
+ subsystem = why[1]
+
+ build_symbol_sets(bld, tgt_list)
+ build_library_names(bld, tgt_list)
+ build_syslib_sets(bld, tgt_list)
+
+ Logs.info("Checking why %s needs to link to %s" % (target, subsystem))
+ if not target in bld.env.used_symbols:
+ Logs.warn("unable to find target '%s' in used_symbols dict" % target)
+ return
+ if not subsystem in bld.env.public_symbols:
+ Logs.warn("unable to find subsystem '%s' in public_symbols dict" % subsystem)
+ return
+ overlap = bld.env.used_symbols[target].intersection(bld.env.public_symbols[subsystem])
+ if not overlap:
+ Logs.info("target '%s' doesn't use any public symbols from '%s'" % (target, subsystem))
+ else:
+ Logs.info("target '%s' uses symbols %s from '%s'" % (target, overlap, subsystem))
+
+
+def report_duplicate(bld, binname, sym, libs, fail_on_error):
+ '''report duplicated symbols'''
+ if sym in ['_init', '_fini', '_edata', '_end', '__bss_start']:
+ return
+ libnames = []
+ for lib in libs:
+ if lib in bld.env.library_dict:
+ libnames.append(bld.env.library_dict[lib])
+ else:
+ libnames.append(lib)
+ if fail_on_error:
+ raise Utils.WafError("%s: Symbol %s linked in multiple libraries %s" % (binname, sym, libnames))
+ else:
+ print("%s: Symbol %s linked in multiple libraries %s" % (binname, sym, libnames))
+
+
+def symbols_dupcheck_binary(bld, binname, fail_on_error):
+ '''check for duplicated symbols in one binary'''
+
+ libs = get_libs_recursive(bld, binname, set())
+ symlist = symbols_extract(bld, libs, dynamic=True)
+
+ symmap = {}
+ for libpath in symlist:
+ for sym in symlist[libpath]['PUBLIC']:
+ if sym == '_GLOBAL_OFFSET_TABLE_':
+ continue
+ if not sym in symmap:
+ symmap[sym] = set()
+ symmap[sym].add(libpath)
+ for sym in symmap:
+ if len(symmap[sym]) > 1:
+ for libpath in symmap[sym]:
+ if libpath in bld.env.library_dict:
+ report_duplicate(bld, binname, sym, symmap[sym], fail_on_error)
+ break
+
+def symbols_dupcheck(task, fail_on_error=False):
+ '''check for symbols defined in two different subsystems'''
+ bld = task.env.bld
+ tgt_list = get_tgt_list(bld)
+
+ targets = LOCAL_CACHE(bld, 'TARGET_TYPE')
+
+ build_library_dict(bld, tgt_list)
+ for t in tgt_list:
+ if t.samba_type == 'BINARY':
+ binname = os_path_relpath(t.link_task.outputs[0].abspath(bld.env), os.getcwd())
+ symbols_dupcheck_binary(bld, binname, fail_on_error)
+
+
+def symbols_dupcheck_fatal(task):
+ '''check for symbols defined in two different subsystems (and fail if duplicates are found)'''
+ symbols_dupcheck(task, fail_on_error=True)
+
+
+def SYMBOL_CHECK(bld):
+ '''check our dependency lists'''
+ if Options.options.SYMBOLCHECK:
+ bld.SET_BUILD_GROUP('symbolcheck')
+ task = bld(rule=symbols_symbolcheck, always=True, name='symbol checking')
+ task.env.bld = bld
+
+ bld.SET_BUILD_GROUP('syslibcheck')
+ task = bld(rule=symbols_syslibcheck, always=True, name='syslib checking')
+ task.env.bld = bld
+
+ bld.SET_BUILD_GROUP('syslibcheck')
+ task = bld(rule=symbols_dupcheck, always=True, name='symbol duplicate checking')
+ task.env.bld = bld
+
+ if Options.options.WHYNEEDED:
+ bld.SET_BUILD_GROUP('syslibcheck')
+ task = bld(rule=symbols_whyneeded, always=True, name='check why a dependency is needed')
+ task.env.bld = bld
+
+
+Build.BuildContext.SYMBOL_CHECK = SYMBOL_CHECK
+
+def DUP_SYMBOL_CHECK(bld):
+ if Options.options.DUP_SYMBOLCHECK and bld.env.DEVELOPER:
+ '''check for duplicate symbols'''
+ bld.SET_BUILD_GROUP('syslibcheck')
+ task = bld(rule=symbols_dupcheck_fatal, always=True, name='symbol duplicate checking')
+ task.env.bld = bld
+
+Build.BuildContext.DUP_SYMBOL_CHECK = DUP_SYMBOL_CHECK
diff --git a/buildtools/wafsamba/test_duplicate_symbol.sh b/buildtools/wafsamba/test_duplicate_symbol.sh
new file mode 100755
index 0000000..89a4027
--- /dev/null
+++ b/buildtools/wafsamba/test_duplicate_symbol.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+# Run the waf duplicate symbol check, wrapped in subunit.
+
+. testprogs/blackbox/subunit.sh
+
+subunit_start_test duplicate_symbols
+
+if ./buildtools/bin/waf build --dup-symbol-check; then
+ subunit_pass_test duplicate_symbols
+else
+ echo | subunit_fail_test duplicate_symbols
+fi
diff --git a/buildtools/wafsamba/tests/__init__.py b/buildtools/wafsamba/tests/__init__.py
new file mode 100644
index 0000000..ae27418
--- /dev/null
+++ b/buildtools/wafsamba/tests/__init__.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 2.1 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""Tests for wafsamba."""
+
+from unittest import (
+ TestCase,
+ TestLoader,
+ )
+
+def test_suite():
+ names = [
+ 'abi',
+ 'bundled',
+ 'utils',
+ ]
+ module_names = ['wafsamba.tests.test_' + name for name in names]
+ loader = TestLoader()
+ result = loader.suiteClass()
+ suite = loader.loadTestsFromNames(module_names)
+ result.addTests(suite)
+ return result
diff --git a/buildtools/wafsamba/tests/test_abi.py b/buildtools/wafsamba/tests/test_abi.py
new file mode 100644
index 0000000..bba78c1
--- /dev/null
+++ b/buildtools/wafsamba/tests/test_abi.py
@@ -0,0 +1,120 @@
+# Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 2.1 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+from wafsamba.tests import TestCase
+
+from wafsamba.samba_abi import (
+ abi_write_vscript,
+ normalise_signature,
+ )
+
+from cStringIO import StringIO
+
+
+class NormaliseSignatureTests(TestCase):
+
+ def test_function_simple(self):
+ self.assertEquals("int (const struct GUID *, const struct GUID *)",
+ normalise_signature("$2 = {int (const struct GUID *, const struct GUID *)} 0xe871 <GUID_compare>"))
+
+ def test_maps_Bool(self):
+ # Some types have different internal names
+ self.assertEquals("bool (const struct GUID *)",
+ normalise_signature("$1 = {_Bool (const struct GUID *)} 0xe75b <GUID_all_zero>"))
+
+ def test_function_keep(self):
+ self.assertEquals(
+ "enum ndr_err_code (struct ndr_push *, int, const union winreg_Data *)",
+ normalise_signature("enum ndr_err_code (struct ndr_push *, int, const union winreg_Data *)"))
+
+ def test_struct_constant(self):
+ self.assertEquals(
+ 'uuid = {time_low = 0, time_mid = 0, time_hi_and_version = 0, clock_seq = "\\000", node = "\\000\\000\\000\\000\\000"}, if_version = 0',
+ normalise_signature('$239 = {uuid = {time_low = 0, time_mid = 0, time_hi_and_version = 0, clock_seq = "\\000", node = "\\000\\000\\000\\000\\000"}, if_version = 0}'))
+
+ def test_incomplete_sequence(self):
+ # Newer versions of gdb insert these incomplete sequence elements
+ self.assertEquals(
+ 'uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2',
+ normalise_signature('$244 = {uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237", <incomplete sequence \\350>, node = "\\b\\000+\\020H`"}, if_version = 2}'))
+ self.assertEquals(
+ 'uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2',
+ normalise_signature('$244 = {uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2}'))
+
+
+class WriteVscriptTests(TestCase):
+
+ def test_one(self):
+ f = StringIO()
+ abi_write_vscript(f, "MYLIB", "1.0", [], {
+ "old": "1.0",
+ "new": "1.0"}, ["*"])
+ self.assertEquals(f.getvalue(), """\
+1.0 {
+\tglobal:
+\t\t*;
+};
+""")
+
+ def test_simple(self):
+ # No restrictions.
+ f = StringIO()
+ abi_write_vscript(f, "MYLIB", "1.0", ["0.1"], {
+ "old": "0.1",
+ "new": "1.0"}, ["*"])
+ self.assertEquals(f.getvalue(), """\
+MYLIB_0.1 {
+\tglobal:
+\t\told;
+};
+
+1.0 {
+\tglobal:
+\t\t*;
+};
+""")
+
+ def test_exclude(self):
+ f = StringIO()
+ abi_write_vscript(f, "MYLIB", "1.0", [], {
+ "exc_old": "0.1",
+ "old": "0.1",
+ "new": "1.0"}, ["!exc_*"])
+ self.assertEquals(f.getvalue(), """\
+1.0 {
+\tglobal:
+\t\t*;
+\tlocal:
+\t\texc_*;
+};
+""")
+
+ def test_excludes_and_includes(self):
+ f = StringIO()
+ abi_write_vscript(f, "MYLIB", "1.0", [], {
+ "pub_foo": "1.0",
+ "exc_bar": "1.0",
+ "other": "1.0"
+ }, ["pub_*", "!exc_*"])
+ self.assertEquals(f.getvalue(), """\
+1.0 {
+\tglobal:
+\t\tpub_*;
+\tlocal:
+\t\texc_*;
+\t\t*;
+};
+""")
diff --git a/buildtools/wafsamba/tests/test_bundled.py b/buildtools/wafsamba/tests/test_bundled.py
new file mode 100644
index 0000000..c5f0db6
--- /dev/null
+++ b/buildtools/wafsamba/tests/test_bundled.py
@@ -0,0 +1,27 @@
+# Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 2.1 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+from wafsamba.tests import TestCase
+
+from wafsamba.samba_bundled import (
+ tuplize_version,
+ )
+
+
+class TuplizeVersionTests(TestCase):
+
+ def test_simple(self):
+ self.assertEquals((1, 2, 10), tuplize_version("1.2.10"))
diff --git a/buildtools/wafsamba/tests/test_utils.py b/buildtools/wafsamba/tests/test_utils.py
new file mode 100644
index 0000000..a9578e2
--- /dev/null
+++ b/buildtools/wafsamba/tests/test_utils.py
@@ -0,0 +1,76 @@
+# Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org>
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 2.1 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+from wafsamba.tests import TestCase
+
+from wafsamba.samba_utils import (
+ TO_LIST,
+ dict_concat,
+ subst_vars_error,
+ unique_list,
+ )
+
+class ToListTests(TestCase):
+
+ def test_none(self):
+ self.assertEquals([], TO_LIST(None))
+
+ def test_already_list(self):
+ self.assertEquals(["foo", "bar", 1], TO_LIST(["foo", "bar", 1]))
+
+ def test_default_delimiter(self):
+ self.assertEquals(["foo", "bar"], TO_LIST("foo bar"))
+ self.assertEquals(["foo", "bar"], TO_LIST(" foo bar "))
+ self.assertEquals(["foo ", "bar"], TO_LIST(" \"foo \" bar "))
+
+ def test_delimiter(self):
+ self.assertEquals(["foo", "bar"], TO_LIST("foo,bar", ","))
+ self.assertEquals([" foo", "bar "], TO_LIST(" foo,bar ", ","))
+ self.assertEquals([" \" foo\"", " bar "], TO_LIST(" \" foo\", bar ", ","))
+
+
+class UniqueListTests(TestCase):
+
+ def test_unique_list(self):
+ self.assertEquals(["foo", "bar"], unique_list(["foo", "bar", "foo"]))
+
+
+class SubstVarsErrorTests(TestCase):
+
+ def test_valid(self):
+ self.assertEquals("", subst_vars_error("", {}))
+ self.assertEquals("FOO bar", subst_vars_error("${F} bar", {"F": "FOO"}))
+
+ def test_invalid(self):
+ self.assertRaises(KeyError, subst_vars_error, "${F}", {})
+
+
+class DictConcatTests(TestCase):
+
+ def test_empty(self):
+ ret = {}
+ dict_concat(ret, {})
+ self.assertEquals({}, ret)
+
+ def test_same(self):
+ ret = {"foo": "bar"}
+ dict_concat(ret, {"foo": "bla"})
+ self.assertEquals({"foo": "bar"}, ret)
+
+ def test_simple(self):
+ ret = {"foo": "bar"}
+ dict_concat(ret, {"blie": "bla"})
+ self.assertEquals({"foo": "bar", "blie": "bla"}, ret)
diff --git a/buildtools/wafsamba/tru64cc.py b/buildtools/wafsamba/tru64cc.py
new file mode 100644
index 0000000..e1bbb1d
--- /dev/null
+++ b/buildtools/wafsamba/tru64cc.py
@@ -0,0 +1,77 @@
+
+# compiler definition for tru64/OSF1 cc compiler
+# based on suncc.py from waf
+
+import os, optparse
+import Utils, Options, Configure
+import ccroot, ar
+from Configure import conftest
+
+from compiler_cc import c_compiler
+
+c_compiler['osf1V'] = ['gcc', 'tru64cc']
+
+@conftest
+def find_tru64cc(conf):
+ v = conf.env
+ cc = None
+ if v['CC']: cc = v['CC']
+ elif 'CC' in conf.environ: cc = conf.environ['CC']
+ if not cc: cc = conf.find_program('cc', var='CC')
+ if not cc: conf.fatal('tru64cc was not found')
+ cc = conf.cmd_to_list(cc)
+
+ try:
+ if not Utils.cmd_output(cc + ['-V']):
+ conf.fatal('tru64cc %r was not found' % cc)
+ except ValueError:
+ conf.fatal('tru64cc -V could not be executed')
+
+ v['CC'] = cc
+ v['CC_NAME'] = 'tru64'
+
+@conftest
+def tru64cc_common_flags(conf):
+ v = conf.env
+
+ v['CC_SRC_F'] = ''
+ v['CC_TGT_F'] = ['-c', '-o', '']
+ v['CPPPATH_ST'] = '-I%s' # template for adding include paths
+
+ # linker
+ if not v['LINK_CC']: v['LINK_CC'] = v['CC']
+ v['CCLNK_SRC_F'] = ''
+ v['CCLNK_TGT_F'] = ['-o', '']
+
+ v['LIB_ST'] = '-l%s' # template for adding libs
+ v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
+ v['STATICLIB_ST'] = '-l%s'
+ v['STATICLIBPATH_ST'] = '-L%s'
+ v['CCDEFINES_ST'] = '-D%s'
+
+# v['SONAME_ST'] = '-Wl,-h -Wl,%s'
+# v['SHLIB_MARKER'] = '-Bdynamic'
+# v['STATICLIB_MARKER'] = '-Bstatic'
+
+ # program
+ v['program_PATTERN'] = '%s'
+
+ # shared library
+# v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC']
+ v['shlib_LINKFLAGS'] = ['-shared']
+ v['shlib_PATTERN'] = 'lib%s.so'
+
+ # static lib
+# v['staticlib_LINKFLAGS'] = ['-Bstatic']
+# v['staticlib_PATTERN'] = 'lib%s.a'
+
+detect = '''
+find_tru64cc
+find_cpp
+find_ar
+tru64cc_common_flags
+cc_load_tools
+cc_add_flags
+link_add_flags
+'''
+
diff --git a/buildtools/wafsamba/wafsamba.py b/buildtools/wafsamba/wafsamba.py
new file mode 100644
index 0000000..d7e482c
--- /dev/null
+++ b/buildtools/wafsamba/wafsamba.py
@@ -0,0 +1,971 @@
+# a waf tool to add autoconf-like macros to the configure section
+# and for SAMBA_ macros for building libraries, binaries etc
+
+import Build, os, sys, Options, Task, Utils, cc, TaskGen, fnmatch, re, shutil, Logs, Constants
+from Configure import conf
+from Logs import debug
+from samba_utils import SUBST_VARS_RECURSIVE
+TaskGen.task_gen.apply_verif = Utils.nada
+
+# bring in the other samba modules
+from samba_optimisation import *
+from samba_utils import *
+from samba_version import *
+from samba_autoconf import *
+from samba_patterns import *
+from samba_pidl import *
+from samba_autoproto import *
+from samba_python import *
+from samba_perl import *
+from samba_deps import *
+from samba_bundled import *
+from samba_third_party import *
+import samba_install
+import samba_conftests
+import samba_abi
+import samba_headers
+import tru64cc
+import irixcc
+import hpuxcc
+import generic_cc
+import samba_dist
+import samba_wildcard
+import stale_files
+import symbols
+import pkgconfig
+import configure_file
+
+# some systems have broken threading in python
+if os.environ.get('WAF_NOTHREADS') == '1':
+ import nothreads
+
+LIB_PATH="shared"
+
+os.environ['PYTHONUNBUFFERED'] = '1'
+
+
+if Constants.HEXVERSION < 0x105019:
+ Logs.error('''
+Please use the version of waf that comes with Samba, not
+a system installed version. See http://wiki.samba.org/index.php/Waf
+for details.
+
+Alternatively, please run ./configure and make as usual. That will
+call the right version of waf.''')
+ sys.exit(1)
+
+
+@conf
+def SAMBA_BUILD_ENV(conf):
+ '''create the samba build environment'''
+ conf.env.BUILD_DIRECTORY = conf.blddir
+ mkdir_p(os.path.join(conf.blddir, LIB_PATH))
+ mkdir_p(os.path.join(conf.blddir, LIB_PATH, "private"))
+ mkdir_p(os.path.join(conf.blddir, "modules"))
+ mkdir_p(os.path.join(conf.blddir, 'python/samba/dcerpc'))
+ # this allows all of the bin/shared and bin/python targets
+ # to be expressed in terms of build directory paths
+ mkdir_p(os.path.join(conf.blddir, 'default'))
+ for (source, target) in [('shared', 'shared'), ('modules', 'modules'), ('python', 'python_modules')]:
+ link_target = os.path.join(conf.blddir, 'default/' + target)
+ if not os.path.lexists(link_target):
+ os.symlink('../' + source, link_target)
+
+ # get perl to put the blib files in the build directory
+ blib_bld = os.path.join(conf.blddir, 'default/pidl/blib')
+ blib_src = os.path.join(conf.srcdir, 'pidl/blib')
+ mkdir_p(blib_bld + '/man1')
+ mkdir_p(blib_bld + '/man3')
+ if os.path.islink(blib_src):
+ os.unlink(blib_src)
+ elif os.path.exists(blib_src):
+ shutil.rmtree(blib_src)
+
+
+def ADD_INIT_FUNCTION(bld, subsystem, target, init_function):
+ '''add an init_function to the list for a subsystem'''
+ if init_function is None:
+ return
+ bld.ASSERT(subsystem is not None, "You must specify a subsystem for init_function '%s'" % init_function)
+ cache = LOCAL_CACHE(bld, 'INIT_FUNCTIONS')
+ if not subsystem in cache:
+ cache[subsystem] = []
+ cache[subsystem].append( { 'TARGET':target, 'INIT_FUNCTION':init_function } )
+Build.BuildContext.ADD_INIT_FUNCTION = ADD_INIT_FUNCTION
+
+
+def generate_empty_file(task):
+ target_fname = installed_location=task.outputs[0].bldpath(task.env)
+ target_file = open(installed_location, 'w')
+ target_file.close()
+ return 0
+
+#################################################################
+def SAMBA_LIBRARY(bld, libname, source,
+ deps='',
+ public_deps='',
+ includes='',
+ public_headers=None,
+ public_headers_install=True,
+ header_path=None,
+ pc_files=None,
+ vnum=None,
+ soname=None,
+ cflags='',
+ ldflags='',
+ external_library=False,
+ realname=None,
+ keep_underscore=False,
+ autoproto=None,
+ autoproto_extra_source='',
+ group='main',
+ depends_on='',
+ local_include=True,
+ global_include=True,
+ vars=None,
+ subdir=None,
+ install_path=None,
+ install=True,
+ pyembed=False,
+ pyext=False,
+ target_type='LIBRARY',
+ bundled_extension=False,
+ bundled_name=None,
+ link_name=None,
+ abi_directory=None,
+ abi_match=None,
+ hide_symbols=False,
+ manpages=None,
+ private_library=False,
+ grouping_library=False,
+ allow_undefined_symbols=False,
+ allow_warnings=False,
+ enabled=True):
+ '''define a Samba library'''
+
+ if LIB_MUST_BE_PRIVATE(bld, libname):
+ private_library=True
+
+ if not enabled:
+ SET_TARGET_TYPE(bld, libname, 'DISABLED')
+ return
+
+ source = bld.EXPAND_VARIABLES(source, vars=vars)
+ if subdir:
+ source = bld.SUBDIR(subdir, source)
+
+ # remember empty libraries, so we can strip the dependencies
+ if ((source == '') or (source == [])):
+ if deps == '' and public_deps == '':
+ SET_TARGET_TYPE(bld, libname, 'EMPTY')
+ return
+ empty_c = libname + '.empty.c'
+ bld.SAMBA_GENERATOR('%s_empty_c' % libname,
+ rule=generate_empty_file,
+ target=empty_c)
+ source=empty_c
+
+ if BUILTIN_LIBRARY(bld, libname):
+ obj_target = libname
+ else:
+ obj_target = libname + '.objlist'
+
+ if group == 'libraries':
+ subsystem_group = 'main'
+ else:
+ subsystem_group = group
+
+ # first create a target for building the object files for this library
+ # by separating in this way, we avoid recompiling the C files
+ # separately for the install library and the build library
+ bld.SAMBA_SUBSYSTEM(obj_target,
+ source = source,
+ deps = deps,
+ public_deps = public_deps,
+ includes = includes,
+ public_headers = public_headers,
+ public_headers_install = public_headers_install,
+ header_path = header_path,
+ cflags = cflags,
+ group = subsystem_group,
+ autoproto = autoproto,
+ autoproto_extra_source=autoproto_extra_source,
+ depends_on = depends_on,
+ hide_symbols = hide_symbols,
+ allow_warnings = allow_warnings,
+ pyembed = pyembed,
+ pyext = pyext,
+ local_include = local_include,
+ global_include = global_include)
+
+ if BUILTIN_LIBRARY(bld, libname):
+ return
+
+ if not SET_TARGET_TYPE(bld, libname, target_type):
+ return
+
+ # the library itself will depend on that object target
+ deps += ' ' + public_deps
+ deps = TO_LIST(deps)
+ deps.append(obj_target)
+
+ realname = bld.map_shlib_extension(realname, python=(target_type=='PYTHON'))
+ link_name = bld.map_shlib_extension(link_name, python=(target_type=='PYTHON'))
+
+ # we don't want any public libraries without version numbers
+ if (not private_library and target_type != 'PYTHON' and not realname):
+ if vnum is None and soname is None:
+ raise Utils.WafError("public library '%s' must have a vnum" %
+ libname)
+ if pc_files is None:
+ raise Utils.WafError("public library '%s' must have pkg-config file" %
+ libname)
+ if public_headers is None:
+ raise Utils.WafError("public library '%s' must have header files" %
+ libname)
+
+ if bundled_name is not None:
+ pass
+ elif target_type == 'PYTHON' or realname or not private_library:
+ if keep_underscore:
+ bundled_name = libname
+ else:
+ bundled_name = libname.replace('_', '-')
+ else:
+ assert (private_library == True and realname is None)
+ if abi_directory or vnum or soname:
+ bundled_extension=True
+ bundled_name = PRIVATE_NAME(bld, libname.replace('_', '-'),
+ bundled_extension, private_library)
+
+ ldflags = TO_LIST(ldflags)
+
+ features = 'cc cshlib symlink_lib install_lib'
+ if pyext:
+ features += ' pyext'
+ if pyembed:
+ features += ' pyembed'
+
+ if abi_directory:
+ features += ' abi_check'
+
+ vscript = None
+ if bld.env.HAVE_LD_VERSION_SCRIPT:
+ if private_library:
+ version = "%s_%s" % (Utils.g_module.APPNAME, Utils.g_module.VERSION)
+ elif vnum:
+ version = "%s_%s" % (libname, vnum)
+ else:
+ version = None
+ if version:
+ vscript = "%s.vscript" % libname
+ bld.ABI_VSCRIPT(libname, abi_directory, version, vscript,
+ abi_match)
+ fullname = apply_pattern(bundled_name, bld.env.shlib_PATTERN)
+ fullpath = bld.path.find_or_declare(fullname)
+ vscriptpath = bld.path.find_or_declare(vscript)
+ if not fullpath:
+ raise Utils.WafError("unable to find fullpath for %s" % fullname)
+ if not vscriptpath:
+ raise Utils.WafError("unable to find vscript path for %s" % vscript)
+ bld.add_manual_dependency(fullpath, vscriptpath)
+ if Options.is_install:
+ # also make the .inst file depend on the vscript
+ instname = apply_pattern(bundled_name + '.inst', bld.env.shlib_PATTERN)
+ bld.add_manual_dependency(bld.path.find_or_declare(instname), bld.path.find_or_declare(vscript))
+ vscript = os.path.join(bld.path.abspath(bld.env), vscript)
+
+ bld.SET_BUILD_GROUP(group)
+ t = bld(
+ features = features,
+ source = [],
+ target = bundled_name,
+ depends_on = depends_on,
+ samba_ldflags = ldflags,
+ samba_deps = deps,
+ samba_includes = includes,
+ version_script = vscript,
+ local_include = local_include,
+ global_include = global_include,
+ vnum = vnum,
+ soname = soname,
+ install_path = None,
+ samba_inst_path = install_path,
+ name = libname,
+ samba_realname = realname,
+ samba_install = install,
+ abi_directory = "%s/%s" % (bld.path.abspath(), abi_directory),
+ abi_match = abi_match,
+ private_library = private_library,
+ grouping_library=grouping_library,
+ allow_undefined_symbols=allow_undefined_symbols
+ )
+
+ if realname and not link_name:
+ link_name = 'shared/%s' % realname
+
+ if link_name:
+ t.link_name = link_name
+
+ if pc_files is not None and not private_library:
+ bld.PKG_CONFIG_FILES(pc_files, vnum=vnum)
+
+ if (manpages is not None and 'XSLTPROC_MANPAGES' in bld.env and
+ bld.env['XSLTPROC_MANPAGES']):
+ bld.MANPAGES(manpages, install)
+
+
+Build.BuildContext.SAMBA_LIBRARY = SAMBA_LIBRARY
+
+
+#################################################################
+def SAMBA_BINARY(bld, binname, source,
+ deps='',
+ includes='',
+ public_headers=None,
+ header_path=None,
+ modules=None,
+ ldflags=None,
+ cflags='',
+ autoproto=None,
+ use_hostcc=False,
+ use_global_deps=True,
+ compiler=None,
+ group='main',
+ manpages=None,
+ local_include=True,
+ global_include=True,
+ subsystem_name=None,
+ pyembed=False,
+ vars=None,
+ subdir=None,
+ install=True,
+ install_path=None,
+ enabled=True):
+ '''define a Samba binary'''
+
+ if not enabled:
+ SET_TARGET_TYPE(bld, binname, 'DISABLED')
+ return
+
+ if not SET_TARGET_TYPE(bld, binname, 'BINARY'):
+ return
+
+ features = 'cc cprogram symlink_bin install_bin'
+ if pyembed:
+ features += ' pyembed'
+
+ obj_target = binname + '.objlist'
+
+ source = bld.EXPAND_VARIABLES(source, vars=vars)
+ if subdir:
+ source = bld.SUBDIR(subdir, source)
+ source = unique_list(TO_LIST(source))
+
+ if group == 'binaries':
+ subsystem_group = 'main'
+ else:
+ subsystem_group = group
+
+ # only specify PIE flags for binaries
+ pie_cflags = cflags
+ pie_ldflags = TO_LIST(ldflags)
+ if bld.env['ENABLE_PIE'] is True:
+ pie_cflags += ' -fPIE'
+ pie_ldflags.extend(TO_LIST('-pie'))
+ if bld.env['ENABLE_RELRO'] is True:
+ pie_ldflags.extend(TO_LIST('-Wl,-z,relro,-z,now'))
+
+ # first create a target for building the object files for this binary
+ # by separating in this way, we avoid recompiling the C files
+ # separately for the install binary and the build binary
+ bld.SAMBA_SUBSYSTEM(obj_target,
+ source = source,
+ deps = deps,
+ includes = includes,
+ cflags = pie_cflags,
+ group = subsystem_group,
+ autoproto = autoproto,
+ subsystem_name = subsystem_name,
+ local_include = local_include,
+ global_include = global_include,
+ use_hostcc = use_hostcc,
+ pyext = pyembed,
+ use_global_deps= use_global_deps)
+
+ bld.SET_BUILD_GROUP(group)
+
+ # the binary itself will depend on that object target
+ deps = TO_LIST(deps)
+ deps.append(obj_target)
+
+ t = bld(
+ features = features,
+ source = [],
+ target = binname,
+ samba_deps = deps,
+ samba_includes = includes,
+ local_include = local_include,
+ global_include = global_include,
+ samba_modules = modules,
+ top = True,
+ samba_subsystem= subsystem_name,
+ install_path = None,
+ samba_inst_path= install_path,
+ samba_install = install,
+ samba_ldflags = pie_ldflags
+ )
+
+ if manpages is not None and 'XSLTPROC_MANPAGES' in bld.env and bld.env['XSLTPROC_MANPAGES']:
+ bld.MANPAGES(manpages, install)
+
+Build.BuildContext.SAMBA_BINARY = SAMBA_BINARY
+
+
+#################################################################
+def SAMBA_MODULE(bld, modname, source,
+ deps='',
+ includes='',
+ subsystem=None,
+ init_function=None,
+ module_init_name='samba_init_module',
+ autoproto=None,
+ autoproto_extra_source='',
+ cflags='',
+ internal_module=True,
+ local_include=True,
+ global_include=True,
+ vars=None,
+ subdir=None,
+ enabled=True,
+ pyembed=False,
+ manpages=None,
+ allow_undefined_symbols=False,
+ allow_warnings=False
+ ):
+ '''define a Samba module.'''
+
+ bld.ASSERT(subsystem, "You must specify a subsystem for SAMBA_MODULE(%s)" % modname)
+
+ source = bld.EXPAND_VARIABLES(source, vars=vars)
+ if subdir:
+ source = bld.SUBDIR(subdir, source)
+
+ if internal_module or BUILTIN_LIBRARY(bld, modname):
+ # Do not create modules for disabled subsystems
+ if GET_TARGET_TYPE(bld, subsystem) == 'DISABLED':
+ return
+ bld.SAMBA_SUBSYSTEM(modname, source,
+ deps=deps,
+ includes=includes,
+ autoproto=autoproto,
+ autoproto_extra_source=autoproto_extra_source,
+ cflags=cflags,
+ local_include=local_include,
+ global_include=global_include,
+ allow_warnings=allow_warnings,
+ enabled=enabled)
+
+ bld.ADD_INIT_FUNCTION(subsystem, modname, init_function)
+ return
+
+ if not enabled:
+ SET_TARGET_TYPE(bld, modname, 'DISABLED')
+ return
+
+ # Do not create modules for disabled subsystems
+ if GET_TARGET_TYPE(bld, subsystem) == 'DISABLED':
+ return
+
+ realname = modname
+ deps += ' ' + subsystem
+ while realname.startswith("lib"+subsystem+"_"):
+ realname = realname[len("lib"+subsystem+"_"):]
+ while realname.startswith(subsystem+"_"):
+ realname = realname[len(subsystem+"_"):]
+
+ build_name = "%s_module_%s" % (subsystem, realname)
+
+ realname = bld.make_libname(realname)
+ while realname.startswith("lib"):
+ realname = realname[len("lib"):]
+
+ build_link_name = "modules/%s/%s" % (subsystem, realname)
+
+ if init_function:
+ cflags += " -D%s=%s" % (init_function, module_init_name)
+
+ bld.SAMBA_LIBRARY(modname,
+ source,
+ deps=deps,
+ includes=includes,
+ cflags=cflags,
+ realname = realname,
+ autoproto = autoproto,
+ local_include=local_include,
+ global_include=global_include,
+ vars=vars,
+ bundled_name=build_name,
+ link_name=build_link_name,
+ install_path="${MODULESDIR}/%s" % subsystem,
+ pyembed=pyembed,
+ manpages=manpages,
+ allow_undefined_symbols=allow_undefined_symbols,
+ allow_warnings=allow_warnings
+ )
+
+
+Build.BuildContext.SAMBA_MODULE = SAMBA_MODULE
+
+
+#################################################################
+def SAMBA_SUBSYSTEM(bld, modname, source,
+ deps='',
+ public_deps='',
+ includes='',
+ public_headers=None,
+ public_headers_install=True,
+ header_path=None,
+ cflags='',
+ cflags_end=None,
+ group='main',
+ init_function_sentinel=None,
+ autoproto=None,
+ autoproto_extra_source='',
+ depends_on='',
+ local_include=True,
+ local_include_first=True,
+ global_include=True,
+ subsystem_name=None,
+ enabled=True,
+ use_hostcc=False,
+ use_global_deps=True,
+ vars=None,
+ subdir=None,
+ hide_symbols=False,
+ allow_warnings=False,
+ pyext=False,
+ pyembed=False):
+ '''define a Samba subsystem'''
+
+ if not enabled:
+ SET_TARGET_TYPE(bld, modname, 'DISABLED')
+ return
+
+ # remember empty subsystems, so we can strip the dependencies
+ if ((source == '') or (source == [])):
+ if deps == '' and public_deps == '':
+ SET_TARGET_TYPE(bld, modname, 'EMPTY')
+ return
+ empty_c = modname + '.empty.c'
+ bld.SAMBA_GENERATOR('%s_empty_c' % modname,
+ rule=generate_empty_file,
+ target=empty_c)
+ source=empty_c
+
+ if not SET_TARGET_TYPE(bld, modname, 'SUBSYSTEM'):
+ return
+
+ source = bld.EXPAND_VARIABLES(source, vars=vars)
+ if subdir:
+ source = bld.SUBDIR(subdir, source)
+ source = unique_list(TO_LIST(source))
+
+ deps += ' ' + public_deps
+
+ bld.SET_BUILD_GROUP(group)
+
+ features = 'cc'
+ if pyext:
+ features += ' pyext'
+ if pyembed:
+ features += ' pyembed'
+
+ t = bld(
+ features = features,
+ source = source,
+ target = modname,
+ samba_cflags = CURRENT_CFLAGS(bld, modname, cflags,
+ allow_warnings=allow_warnings,
+ hide_symbols=hide_symbols),
+ depends_on = depends_on,
+ samba_deps = TO_LIST(deps),
+ samba_includes = includes,
+ local_include = local_include,
+ local_include_first = local_include_first,
+ global_include = global_include,
+ samba_subsystem= subsystem_name,
+ samba_use_hostcc = use_hostcc,
+ samba_use_global_deps = use_global_deps,
+ )
+
+ if cflags_end is not None:
+ t.samba_cflags.extend(TO_LIST(cflags_end))
+
+ if autoproto is not None:
+ bld.SAMBA_AUTOPROTO(autoproto, source + TO_LIST(autoproto_extra_source))
+ if public_headers is not None:
+ bld.PUBLIC_HEADERS(public_headers, header_path=header_path,
+ public_headers_install=public_headers_install)
+ return t
+
+
+Build.BuildContext.SAMBA_SUBSYSTEM = SAMBA_SUBSYSTEM
+
+
+def SAMBA_GENERATOR(bld, name, rule, source='', target='',
+ group='generators', enabled=True,
+ public_headers=None,
+ public_headers_install=True,
+ header_path=None,
+ vars=None,
+ dep_vars=[],
+ always=False):
+ '''A generic source generator target'''
+
+ if not SET_TARGET_TYPE(bld, name, 'GENERATOR'):
+ return
+
+ if not enabled:
+ return
+
+ dep_vars.append('ruledeps')
+ dep_vars.append('SAMBA_GENERATOR_VARS')
+
+ bld.SET_BUILD_GROUP(group)
+ t = bld(
+ rule=rule,
+ source=bld.EXPAND_VARIABLES(source, vars=vars),
+ target=target,
+ shell=isinstance(rule, str),
+ update_outputs=True,
+ before='cc',
+ ext_out='.c',
+ samba_type='GENERATOR',
+ dep_vars = dep_vars,
+ name=name)
+
+ if vars is None:
+ vars = {}
+ t.env.SAMBA_GENERATOR_VARS = vars
+
+ if always:
+ t.always = True
+
+ if public_headers is not None:
+ bld.PUBLIC_HEADERS(public_headers, header_path=header_path,
+ public_headers_install=public_headers_install)
+ return t
+Build.BuildContext.SAMBA_GENERATOR = SAMBA_GENERATOR
+
+
+
+@runonce
+def SETUP_BUILD_GROUPS(bld):
+ '''setup build groups used to ensure that the different build
+ phases happen consecutively'''
+ bld.p_ln = bld.srcnode # we do want to see all targets!
+ bld.env['USING_BUILD_GROUPS'] = True
+ bld.add_group('setup')
+ bld.add_group('build_compiler_source')
+ bld.add_group('vscripts')
+ bld.add_group('base_libraries')
+ bld.add_group('generators')
+ bld.add_group('compiler_prototypes')
+ bld.add_group('compiler_libraries')
+ bld.add_group('build_compilers')
+ bld.add_group('build_source')
+ bld.add_group('prototypes')
+ bld.add_group('headers')
+ bld.add_group('main')
+ bld.add_group('symbolcheck')
+ bld.add_group('syslibcheck')
+ bld.add_group('final')
+Build.BuildContext.SETUP_BUILD_GROUPS = SETUP_BUILD_GROUPS
+
+
+def SET_BUILD_GROUP(bld, group):
+ '''set the current build group'''
+ if not 'USING_BUILD_GROUPS' in bld.env:
+ return
+ bld.set_group(group)
+Build.BuildContext.SET_BUILD_GROUP = SET_BUILD_GROUP
+
+
+
+@conf
+def ENABLE_TIMESTAMP_DEPENDENCIES(conf):
+ """use timestamps instead of file contents for deps
+ this currently doesn't work"""
+ def h_file(filename):
+ import stat
+ st = os.stat(filename)
+ if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('not a file')
+ m = Utils.md5()
+ m.update(str(st.st_mtime))
+ m.update(str(st.st_size))
+ m.update(filename)
+ return m.digest()
+ Utils.h_file = h_file
+
+
+def SAMBA_SCRIPT(bld, name, pattern, installdir, installname=None):
+ '''used to copy scripts from the source tree into the build directory
+ for use by selftest'''
+
+ source = bld.path.ant_glob(pattern)
+
+ bld.SET_BUILD_GROUP('build_source')
+ for s in TO_LIST(source):
+ iname = s
+ if installname is not None:
+ iname = installname
+ target = os.path.join(installdir, iname)
+ tgtdir = os.path.dirname(os.path.join(bld.srcnode.abspath(bld.env), '..', target))
+ mkdir_p(tgtdir)
+ link_src = os.path.normpath(os.path.join(bld.curdir, s))
+ link_dst = os.path.join(tgtdir, os.path.basename(iname))
+ if os.path.islink(link_dst) and os.readlink(link_dst) == link_src:
+ continue
+ if os.path.exists(link_dst):
+ os.unlink(link_dst)
+ Logs.info("symlink: %s -> %s/%s" % (s, installdir, iname))
+ os.symlink(link_src, link_dst)
+Build.BuildContext.SAMBA_SCRIPT = SAMBA_SCRIPT
+
+
+def copy_and_fix_python_path(task):
+ pattern='sys.path.insert(0, "bin/python")'
+ if task.env["PYTHONARCHDIR"] in sys.path and task.env["PYTHONDIR"] in sys.path:
+ replacement = ""
+ elif task.env["PYTHONARCHDIR"] == task.env["PYTHONDIR"]:
+ replacement="""sys.path.insert(0, "%s")""" % task.env["PYTHONDIR"]
+ else:
+ replacement="""sys.path.insert(0, "%s")
+sys.path.insert(1, "%s")""" % (task.env["PYTHONARCHDIR"], task.env["PYTHONDIR"])
+
+ if task.env["PYTHON"][0] == "/":
+ replacement_shebang = "#!%s\n" % task.env["PYTHON"]
+ else:
+ replacement_shebang = "#!/usr/bin/env %s\n" % task.env["PYTHON"]
+
+ installed_location=task.outputs[0].bldpath(task.env)
+ source_file = open(task.inputs[0].srcpath(task.env))
+ installed_file = open(installed_location, 'w')
+ lineno = 0
+ for line in source_file:
+ newline = line
+ if (lineno == 0 and task.env["PYTHON_SPECIFIED"] is True and
+ line[:2] == "#!"):
+ newline = replacement_shebang
+ elif pattern in line:
+ newline = line.replace(pattern, replacement)
+ installed_file.write(newline)
+ lineno = lineno + 1
+ installed_file.close()
+ os.chmod(installed_location, 0755)
+ return 0
+
+def copy_and_fix_perl_path(task):
+ pattern='use lib "$RealBin/lib";'
+
+ replacement = ""
+ if not task.env["PERL_LIB_INSTALL_DIR"] in task.env["PERL_INC"]:
+ replacement = 'use lib "%s";' % task.env["PERL_LIB_INSTALL_DIR"]
+
+ if task.env["PERL"][0] == "/":
+ replacement_shebang = "#!%s\n" % task.env["PERL"]
+ else:
+ replacement_shebang = "#!/usr/bin/env %s\n" % task.env["PERL"]
+
+ installed_location=task.outputs[0].bldpath(task.env)
+ source_file = open(task.inputs[0].srcpath(task.env))
+ installed_file = open(installed_location, 'w')
+ lineno = 0
+ for line in source_file:
+ newline = line
+ if lineno == 0 and task.env["PERL_SPECIFIED"] == True and line[:2] == "#!":
+ newline = replacement_shebang
+ elif pattern in line:
+ newline = line.replace(pattern, replacement)
+ installed_file.write(newline)
+ lineno = lineno + 1
+ installed_file.close()
+ os.chmod(installed_location, 0755)
+ return 0
+
+
+def install_file(bld, destdir, file, chmod=MODE_644, flat=False,
+ python_fixup=False, perl_fixup=False,
+ destname=None, base_name=None):
+ '''install a file'''
+ destdir = bld.EXPAND_VARIABLES(destdir)
+ if not destname:
+ destname = file
+ if flat:
+ destname = os.path.basename(destname)
+ dest = os.path.join(destdir, destname)
+ if python_fixup:
+ # fix the path python will use to find Samba modules
+ inst_file = file + '.inst'
+ bld.SAMBA_GENERATOR('python_%s' % destname,
+ rule=copy_and_fix_python_path,
+ dep_vars=["PYTHON","PYTHON_SPECIFIED","PYTHONDIR","PYTHONARCHDIR"],
+ source=file,
+ target=inst_file)
+ file = inst_file
+ if perl_fixup:
+ # fix the path perl will use to find Samba modules
+ inst_file = file + '.inst'
+ bld.SAMBA_GENERATOR('perl_%s' % destname,
+ rule=copy_and_fix_perl_path,
+ dep_vars=["PERL","PERL_SPECIFIED","PERL_LIB_INSTALL_DIR"],
+ source=file,
+ target=inst_file)
+ file = inst_file
+ if base_name:
+ file = os.path.join(base_name, file)
+ bld.install_as(dest, file, chmod=chmod)
+
+
+def INSTALL_FILES(bld, destdir, files, chmod=MODE_644, flat=False,
+ python_fixup=False, perl_fixup=False,
+ destname=None, base_name=None):
+ '''install a set of files'''
+ for f in TO_LIST(files):
+ install_file(bld, destdir, f, chmod=chmod, flat=flat,
+ python_fixup=python_fixup, perl_fixup=perl_fixup,
+ destname=destname, base_name=base_name)
+Build.BuildContext.INSTALL_FILES = INSTALL_FILES
+
+
+def INSTALL_WILDCARD(bld, destdir, pattern, chmod=MODE_644, flat=False,
+ python_fixup=False, exclude=None, trim_path=None):
+ '''install a set of files matching a wildcard pattern'''
+ files=TO_LIST(bld.path.ant_glob(pattern))
+ if trim_path:
+ files2 = []
+ for f in files:
+ files2.append(os_path_relpath(f, trim_path))
+ files = files2
+
+ if exclude:
+ for f in files[:]:
+ if fnmatch.fnmatch(f, exclude):
+ files.remove(f)
+ INSTALL_FILES(bld, destdir, files, chmod=chmod, flat=flat,
+ python_fixup=python_fixup, base_name=trim_path)
+Build.BuildContext.INSTALL_WILDCARD = INSTALL_WILDCARD
+
+
+def INSTALL_DIRS(bld, destdir, dirs):
+ '''install a set of directories'''
+ destdir = bld.EXPAND_VARIABLES(destdir)
+ dirs = bld.EXPAND_VARIABLES(dirs)
+ for d in TO_LIST(dirs):
+ bld.install_dir(os.path.join(destdir, d))
+Build.BuildContext.INSTALL_DIRS = INSTALL_DIRS
+
+
+def MANPAGES(bld, manpages, install):
+ '''build and install manual pages'''
+ bld.env.MAN_XSL = 'http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl'
+ for m in manpages.split():
+ source = m + '.xml'
+ bld.SAMBA_GENERATOR(m,
+ source=source,
+ target=m,
+ group='final',
+ rule='${XSLTPROC} --xinclude -o ${TGT} --nonet ${MAN_XSL} ${SRC}'
+ )
+ if install:
+ bld.INSTALL_FILES('${MANDIR}/man%s' % m[-1], m, flat=True)
+Build.BuildContext.MANPAGES = MANPAGES
+
+def SAMBAMANPAGES(bld, manpages, extra_source=None):
+ '''build and install manual pages'''
+ bld.env.SAMBA_EXPAND_XSL = bld.srcnode.abspath() + '/docs-xml/xslt/expand-sambadoc.xsl'
+ bld.env.SAMBA_MAN_XSL = bld.srcnode.abspath() + '/docs-xml/xslt/man.xsl'
+ bld.env.SAMBA_CATALOG = bld.srcnode.abspath() + '/bin/default/docs-xml/build/catalog.xml'
+ bld.env.SAMBA_CATALOGS = 'file:///etc/xml/catalog file:///usr/local/share/xml/catalog file://' + bld.env.SAMBA_CATALOG
+
+ for m in manpages.split():
+ source = m + '.xml'
+ if extra_source is not None:
+ source = [source, extra_source]
+ bld.SAMBA_GENERATOR(m,
+ source=source,
+ target=m,
+ group='final',
+ dep_vars=['SAMBA_MAN_XSL', 'SAMBA_EXPAND_XSL', 'SAMBA_CATALOG'],
+ rule='''XML_CATALOG_FILES="${SAMBA_CATALOGS}"
+ export XML_CATALOG_FILES
+ ${XSLTPROC} --xinclude --stringparam noreference 0 -o ${TGT}.xml --nonet ${SAMBA_EXPAND_XSL} ${SRC[0].abspath(env)}
+ ${XSLTPROC} --nonet -o ${TGT} ${SAMBA_MAN_XSL} ${TGT}.xml'''
+ )
+ bld.INSTALL_FILES('${MANDIR}/man%s' % m[-1], m, flat=True)
+Build.BuildContext.SAMBAMANPAGES = SAMBAMANPAGES
+
+#############################################################
+# give a nicer display when building different types of files
+def progress_display(self, msg, fname):
+ col1 = Logs.colors(self.color)
+ col2 = Logs.colors.NORMAL
+ total = self.position[1]
+ n = len(str(total))
+ fs = '[%%%dd/%%%dd] %s %%s%%s%%s\n' % (n, n, msg)
+ return fs % (self.position[0], self.position[1], col1, fname, col2)
+
+def link_display(self):
+ if Options.options.progress_bar != 0:
+ return Task.Task.old_display(self)
+ fname = self.outputs[0].bldpath(self.env)
+ return progress_display(self, 'Linking', fname)
+Task.TaskBase.classes['cc_link'].display = link_display
+
+def samba_display(self):
+ if Options.options.progress_bar != 0:
+ return Task.Task.old_display(self)
+
+ targets = LOCAL_CACHE(self, 'TARGET_TYPE')
+ if self.name in targets:
+ target_type = targets[self.name]
+ type_map = { 'GENERATOR' : 'Generating',
+ 'PROTOTYPE' : 'Generating'
+ }
+ if target_type in type_map:
+ return progress_display(self, type_map[target_type], self.name)
+
+ if len(self.inputs) == 0:
+ return Task.Task.old_display(self)
+
+ fname = self.inputs[0].bldpath(self.env)
+ if fname[0:3] == '../':
+ fname = fname[3:]
+ ext_loc = fname.rfind('.')
+ if ext_loc == -1:
+ return Task.Task.old_display(self)
+ ext = fname[ext_loc:]
+
+ ext_map = { '.idl' : 'Compiling IDL',
+ '.et' : 'Compiling ERRTABLE',
+ '.asn1': 'Compiling ASN1',
+ '.c' : 'Compiling' }
+ if ext in ext_map:
+ return progress_display(self, ext_map[ext], fname)
+ return Task.Task.old_display(self)
+
+Task.TaskBase.classes['Task'].old_display = Task.TaskBase.classes['Task'].display
+Task.TaskBase.classes['Task'].display = samba_display
+
+
+@after('apply_link')
+@feature('cshlib')
+def apply_bundle_remove_dynamiclib_patch(self):
+ if self.env['MACBUNDLE'] or getattr(self,'mac_bundle',False):
+ if not getattr(self,'vnum',None):
+ try:
+ self.env['LINKFLAGS'].remove('-dynamiclib')
+ self.env['LINKFLAGS'].remove('-single_module')
+ except ValueError:
+ pass
diff --git a/buildtools/wafsamba/wscript b/buildtools/wafsamba/wscript
new file mode 100755
index 0000000..694147e
--- /dev/null
+++ b/buildtools/wafsamba/wscript
@@ -0,0 +1,540 @@
+#!/usr/bin/env python
+
+# this is a base set of waf rules that everything else pulls in first
+
+import sys, wafsamba, Configure, Logs
+import Options, os, preproc
+from samba_utils import *
+from optparse import SUPPRESS_HELP
+
+# this forces configure to be re-run if any of the configure
+# sections of the build scripts change. We have to check
+# for this in sys.argv as options have not yet been parsed when
+# we need to set this. This is off by default until some issues
+# are resolved related to WAFCACHE. It will need a lot of testing
+# before it is enabled by default.
+if '--enable-auto-reconfigure' in sys.argv:
+ Configure.autoconfig = True
+
+def set_options(opt):
+ opt.tool_options('compiler_cc')
+
+ opt.tool_options('gnu_dirs')
+
+ gr = opt.option_group('library handling options')
+
+ gr.add_option('--bundled-libraries',
+ help=("comma separated list of bundled libraries. May include !LIBNAME to disable bundling a library. Can be 'NONE' or 'ALL' [auto]"),
+ action="store", dest='BUNDLED_LIBS', default='')
+
+ gr.add_option('--private-libraries',
+ help=("comma separated list of normally public libraries to build instead as private libraries. May include !LIBNAME to disable making a library private. Can be 'NONE' or 'ALL' [auto]"),
+ action="store", dest='PRIVATE_LIBS', default='')
+
+ extension_default = Options.options['PRIVATE_EXTENSION_DEFAULT']
+ gr.add_option('--private-library-extension',
+ help=("name extension for private libraries [%s]" % extension_default),
+ action="store", dest='PRIVATE_EXTENSION', default=extension_default)
+
+ extension_exception = Options.options['PRIVATE_EXTENSION_EXCEPTION']
+ gr.add_option('--private-extension-exception',
+ help=("comma separated list of libraries to not apply extension to [%s]" % extension_exception),
+ action="store", dest='PRIVATE_EXTENSION_EXCEPTION', default=extension_exception)
+
+ builtin_default = Options.options['BUILTIN_LIBRARIES_DEFAULT']
+ gr.add_option('--builtin-libraries',
+ help=("command separated list of libraries to build directly into binaries [%s]" % builtin_default),
+ action="store", dest='BUILTIN_LIBRARIES', default=builtin_default)
+
+ gr.add_option('--minimum-library-version',
+ help=("list of minimum system library versions (LIBNAME1:version,LIBNAME2:version)"),
+ action="store", dest='MINIMUM_LIBRARY_VERSION', default='')
+
+ gr.add_option('--disable-rpath',
+ help=("Disable use of rpath for build binaries"),
+ action="store_true", dest='disable_rpath_build', default=False)
+ gr.add_option('--disable-rpath-install',
+ help=("Disable use of rpath for library path in installed files"),
+ action="store_true", dest='disable_rpath_install', default=False)
+ gr.add_option('--disable-rpath-private-install',
+ help=("Disable use of rpath for private library path in installed files"),
+ action="store_true", dest='disable_rpath_private_install', default=False)
+ gr.add_option('--nonshared-binary',
+ help=("Disable use of shared libs for the listed binaries"),
+ action="store", dest='NONSHARED_BINARIES', default='')
+ gr.add_option('--disable-symbol-versions',
+ help=("Disable use of the --version-script linker option"),
+ action="store_true", dest='disable_symbol_versions', default=False)
+
+ opt.add_option('--with-modulesdir',
+ help=("modules directory [PREFIX/modules]"),
+ action="store", dest='MODULESDIR', default='${PREFIX}/modules')
+
+ opt.add_option('--with-privatelibdir',
+ help=("private library directory [PREFIX/lib/%s]" % Utils.g_module.APPNAME),
+ action="store", dest='PRIVATELIBDIR', default=None)
+
+ opt.add_option('--with-libiconv',
+ help='additional directory to search for libiconv',
+ action='store', dest='iconv_open', default='/usr/local',
+ match = ['Checking for library iconv', 'Checking for iconv_open', 'Checking for header iconv.h'])
+ opt.add_option('--with-gettext',
+ help='additional directory to search for gettext',
+ action='store', dest='gettext_location', default='None')
+ opt.add_option('--without-gettext',
+ help=("Disable use of gettext"),
+ action="store_true", dest='disable_gettext', default=False)
+
+ gr = opt.option_group('developer options')
+
+ gr.add_option('-C',
+ help='enable configure cacheing',
+ action='store_true', dest='enable_configure_cache')
+ gr.add_option('--enable-auto-reconfigure',
+ help='enable automatic reconfigure on build',
+ action='store_true', dest='enable_auto_reconfigure')
+ gr.add_option('--enable-debug',
+ help=("Turn on debugging symbols"),
+ action="store_true", dest='debug', default=False)
+ gr.add_option('--enable-developer',
+ help=("Turn on developer warnings and debugging"),
+ action="store_true", dest='developer', default=False)
+ gr.add_option('--picky-developer',
+ help=("Treat all warnings as errors (enable -Werror)"),
+ action="store_true", dest='picky_developer', default=False)
+ gr.add_option('--fatal-errors',
+ help=("Stop compilation on first error (enable -Wfatal-errors)"),
+ action="store_true", dest='fatal_errors', default=False)
+ gr.add_option('--enable-gccdeps',
+ help=("Enable use of gcc -MD dependency module"),
+ action="store_true", dest='enable_gccdeps', default=True)
+ gr.add_option('--timestamp-dependencies',
+ help=("use file timestamps instead of content for build dependencies (BROKEN)"),
+ action="store_true", dest='timestamp_dependencies', default=False)
+ gr.add_option('--pedantic',
+ help=("Enable even more compiler warnings"),
+ action='store_true', dest='pedantic', default=False)
+ gr.add_option('--git-local-changes',
+ help=("mark version with + if local git changes"),
+ action='store_true', dest='GIT_LOCAL_CHANGES', default=False)
+ gr.add_option('--address-sanitizer',
+ help=("Enable address sanitizer compile and linker flags"),
+ action="store_true", dest='address_sanitizer', default=False)
+
+ gr.add_option('--abi-check',
+ help=("Check ABI signatures for libraries"),
+ action='store_true', dest='ABI_CHECK', default=False)
+ gr.add_option('--abi-check-disable',
+ help=("Disable ABI checking (used with --enable-developer)"),
+ action='store_true', dest='ABI_CHECK_DISABLE', default=False)
+ gr.add_option('--abi-update',
+ help=("Update ABI signature files for libraries"),
+ action='store_true', dest='ABI_UPDATE', default=False)
+
+ gr.add_option('--show-deps',
+ help=("Show dependency tree for the given target"),
+ dest='SHOWDEPS', default='')
+
+ gr.add_option('--symbol-check',
+ help=("check symbols in object files against project rules"),
+ action='store_true', dest='SYMBOLCHECK', default=False)
+
+ gr.add_option('--dup-symbol-check',
+ help=("check for duplicate symbols in object files and system libs (must be configured with --enable-developer)"),
+ action='store_true', dest='DUP_SYMBOLCHECK', default=False)
+
+ gr.add_option('--why-needed',
+ help=("TARGET:DEPENDENCY check why TARGET needs DEPENDENCY"),
+ action='store', type='str', dest='WHYNEEDED', default=None)
+
+ gr.add_option('--show-duplicates',
+ help=("Show objects which are included in multiple binaries or libraries"),
+ action='store_true', dest='SHOW_DUPLICATES', default=False)
+
+ gr = opt.add_option_group('cross compilation options')
+
+ gr.add_option('--cross-compile',
+ help=("configure for cross-compilation"),
+ action='store_true', dest='CROSS_COMPILE', default=False)
+ gr.add_option('--cross-execute',
+ help=("command prefix to use for cross-execution in configure"),
+ action='store', dest='CROSS_EXECUTE', default='')
+ gr.add_option('--cross-answers',
+ help=("answers to cross-compilation configuration (auto modified)"),
+ action='store', dest='CROSS_ANSWERS', default='')
+ gr.add_option('--hostcc',
+ help=("set host compiler when cross compiling"),
+ action='store', dest='HOSTCC', default=False)
+
+ # we use SUPPRESS_HELP for these, as they are ignored, and are there only
+ # to allow existing RPM spec files to work
+ opt.add_option('--build',
+ help=SUPPRESS_HELP,
+ action='store', dest='AUTOCONF_BUILD', default='')
+ opt.add_option('--host',
+ help=SUPPRESS_HELP,
+ action='store', dest='AUTOCONF_HOST', default='')
+ opt.add_option('--target',
+ help=SUPPRESS_HELP,
+ action='store', dest='AUTOCONF_TARGET', default='')
+ opt.add_option('--program-prefix',
+ help=SUPPRESS_HELP,
+ action='store', dest='AUTOCONF_PROGRAM_PREFIX', default='')
+ opt.add_option('--disable-dependency-tracking',
+ help=SUPPRESS_HELP,
+ action='store_true', dest='AUTOCONF_DISABLE_DEPENDENCY_TRACKING', default=False)
+ opt.add_option('--disable-silent-rules',
+ help=SUPPRESS_HELP,
+ action='store_true', dest='AUTOCONF_DISABLE_SILENT_RULES', default=False)
+
+ gr = opt.option_group('dist options')
+ gr.add_option('--sign-release',
+ help='sign the release tarball created by waf dist',
+ action='store_true', dest='SIGN_RELEASE')
+ gr.add_option('--tag',
+ help='tag release in git at the same time',
+ type='string', action='store', dest='TAG_RELEASE')
+
+
+@wafsamba.runonce
+def configure(conf):
+ conf.env.hlist = []
+ conf.env.srcdir = conf.srcdir
+
+ if Options.options.timestamp_dependencies:
+ conf.ENABLE_TIMESTAMP_DEPENDENCIES()
+
+ conf.SETUP_CONFIGURE_CACHE(Options.options.enable_configure_cache)
+
+ # load our local waf extensions
+ conf.check_tool('gnu_dirs')
+ conf.check_tool('wafsamba')
+
+ conf.CHECK_CC_ENV()
+
+ conf.check_tool('compiler_cc')
+
+ conf.CHECK_STANDARD_LIBPATH()
+
+ # we need git for 'waf dist'
+ conf.find_program('git', var='GIT')
+
+ # older gcc versions (< 4.4) does not work with gccdeps, so we have to see if the .d file is generated
+ if Options.options.enable_gccdeps:
+ from TaskGen import feature, after
+ @feature('testd')
+ @after('apply_core')
+ def check_d(self):
+ tsk = self.compiled_tasks[0]
+ tsk.outputs.append(tsk.outputs[0].change_ext('.d'))
+
+ import Task
+ cc = Task.TaskBase.classes['cc']
+ oldmeth = cc.run
+
+ cc.run = Task.compile_fun_noshell('cc', '${CC} ${CCFLAGS} ${CPPFLAGS} ${_CCINCFLAGS} ${_CCDEFFLAGS} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT[0].abspath(env)}')[0]
+ try:
+ try:
+ conf.check(features='cc testd', fragment='int main() {return 0;}\n', ccflags=['-MD'], mandatory=True, msg='Check for -MD')
+ except:
+ pass
+ else:
+ conf.check_tool('gccdeps', tooldir=conf.srcdir + "/buildtools/wafsamba")
+ finally:
+ cc.run = oldmeth
+
+ # make the install paths available in environment
+ conf.env.LIBDIR = Options.options.LIBDIR or '${PREFIX}/lib'
+ conf.env.BINDIR = Options.options.BINDIR or '${PREFIX}/bin'
+ conf.env.SBINDIR = Options.options.SBINDIR or '${PREFIX}/sbin'
+ conf.env.MODULESDIR = Options.options.MODULESDIR
+ conf.env.PRIVATELIBDIR = Options.options.PRIVATELIBDIR
+ conf.env.BUNDLED_LIBS = Options.options.BUNDLED_LIBS.split(',')
+ conf.env.PRIVATE_LIBS = Options.options.PRIVATE_LIBS.split(',')
+ conf.env.BUILTIN_LIBRARIES = Options.options.BUILTIN_LIBRARIES.split(',')
+ conf.env.NONSHARED_BINARIES = Options.options.NONSHARED_BINARIES.split(',')
+
+ conf.env.PRIVATE_EXTENSION = Options.options.PRIVATE_EXTENSION
+ conf.env.PRIVATE_EXTENSION_EXCEPTION = Options.options.PRIVATE_EXTENSION_EXCEPTION.split(',')
+
+ conf.env.CROSS_COMPILE = Options.options.CROSS_COMPILE
+ conf.env.CROSS_EXECUTE = Options.options.CROSS_EXECUTE
+ conf.env.CROSS_ANSWERS = Options.options.CROSS_ANSWERS
+ conf.env.HOSTCC = Options.options.HOSTCC
+
+ conf.env.AUTOCONF_BUILD = Options.options.AUTOCONF_BUILD
+ conf.env.AUTOCONF_HOST = Options.options.AUTOCONF_HOST
+ conf.env.AUTOCONF_PROGRAM_PREFIX = Options.options.AUTOCONF_PROGRAM_PREFIX
+
+ if (conf.env.AUTOCONF_HOST and
+ conf.env.AUTOCONF_BUILD and
+ conf.env.AUTOCONF_BUILD != conf.env.AUTOCONF_HOST):
+ Logs.error('ERROR: Mismatch between --build and --host. Please use --cross-compile instead')
+ sys.exit(1)
+ if conf.env.AUTOCONF_PROGRAM_PREFIX:
+ Logs.error('ERROR: --program-prefix not supported')
+ sys.exit(1)
+
+ # enable ABI checking for developers
+ conf.env.ABI_CHECK = Options.options.ABI_CHECK or Options.options.developer
+ if Options.options.ABI_CHECK_DISABLE:
+ conf.env.ABI_CHECK = False
+ try:
+ conf.find_program('gdb', mandatory=True)
+ except:
+ conf.env.ABI_CHECK = False
+
+ conf.env.GIT_LOCAL_CHANGES = Options.options.GIT_LOCAL_CHANGES
+
+ conf.CHECK_COMMAND(['uname', '-a'],
+ msg='Checking build system',
+ define='BUILD_SYSTEM',
+ on_target=False)
+ conf.CHECK_UNAME()
+
+ # see if we can compile and run a simple C program
+ conf.CHECK_CODE('printf("hello world")',
+ define='HAVE_SIMPLE_C_PROG',
+ mandatory=True,
+ execute=True,
+ headers='stdio.h',
+ msg='Checking simple C program')
+
+ # Try to find the right extra flags for -Werror behaviour
+ for f in ["-Werror", # GCC
+ "-errwarn=%all", # Sun Studio
+ "-qhalt=w", # IBM xlc
+ "-w2", # Tru64
+ ]:
+ if conf.CHECK_CFLAGS([f], '''
+'''):
+ if not 'WERROR_CFLAGS' in conf.env:
+ conf.env['WERROR_CFLAGS'] = []
+ conf.env['WERROR_CFLAGS'].extend([f])
+ break
+
+ # check which compiler/linker flags are needed for rpath support
+ if not conf.CHECK_LDFLAGS(['-Wl,-rpath,.']) and conf.CHECK_LDFLAGS(['-Wl,-R,.']):
+ conf.env['RPATH_ST'] = '-Wl,-R,%s'
+
+ # check for rpath
+ if conf.CHECK_LIBRARY_SUPPORT(rpath=True):
+ support_rpath = True
+ conf.env.RPATH_ON_BUILD = not Options.options.disable_rpath_build
+ conf.env.RPATH_ON_INSTALL = (conf.env.RPATH_ON_BUILD and
+ not Options.options.disable_rpath_install)
+ if not conf.env.PRIVATELIBDIR:
+ conf.env.PRIVATELIBDIR = '%s/%s' % (conf.env.LIBDIR, Utils.g_module.APPNAME)
+ conf.env.RPATH_ON_INSTALL_PRIVATE = (
+ not Options.options.disable_rpath_private_install)
+ else:
+ support_rpath = False
+ conf.env.RPATH_ON_INSTALL = False
+ conf.env.RPATH_ON_BUILD = False
+ conf.env.RPATH_ON_INSTALL_PRIVATE = False
+ if not conf.env.PRIVATELIBDIR:
+ # rpath is not possible so there is no sense in having a
+ # private library directory by default.
+ # the user can of course always override it.
+ conf.env.PRIVATELIBDIR = conf.env.LIBDIR
+
+ if (not Options.options.disable_symbol_versions and
+ conf.CHECK_LIBRARY_SUPPORT(rpath=support_rpath,
+ version_script=True,
+ msg='-Wl,--version-script support')):
+ conf.env.HAVE_LD_VERSION_SCRIPT = True
+ else:
+ conf.env.HAVE_LD_VERSION_SCRIPT = False
+
+ if conf.CHECK_CFLAGS(['-fvisibility=hidden'] + conf.env.WERROR_CFLAGS):
+ conf.env.VISIBILITY_CFLAGS = '-fvisibility=hidden'
+ conf.CHECK_CODE('''int main(void) { return 0; }
+ __attribute__((visibility("default"))) void vis_foo2(void) {}''',
+ cflags=conf.env.VISIBILITY_CFLAGS,
+ define='HAVE_VISIBILITY_ATTR', addmain=False)
+
+ if sys.platform.startswith('aix'):
+ conf.DEFINE('_ALL_SOURCE', 1, add_to_cflags=True)
+ # Might not be needed if ALL_SOURCE is defined
+ # conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True)
+
+ # we should use the PIC options in waf instead
+ # Some compilo didn't support -fPIC but just print a warning
+ if conf.env['COMPILER_CC'] == "suncc":
+ conf.ADD_CFLAGS('-KPIC', testflags=True)
+ # we really want define here as we need to have this
+ # define even during the tests otherwise detection of
+ # boolean is broken
+ conf.DEFINE('_STDC_C99', 1, add_to_cflags=True)
+ conf.DEFINE('_XPG6', 1, add_to_cflags=True)
+ else:
+ conf.ADD_CFLAGS('-fPIC', testflags=True)
+
+ # On Solaris 8 with suncc (at least) the flags for the linker to define the name of the
+ # library are not always working (if the command line is very very long and with a lot
+ # files)
+
+ if conf.env['COMPILER_CC'] == "suncc":
+ save = conf.env['SONAME_ST']
+ conf.env['SONAME_ST'] = '-Wl,-h,%s'
+ if not conf.CHECK_SHLIB_INTRASINC_NAME_FLAGS("Checking if flags %s are ok" % conf.env['SONAME_ST']):
+ conf.env['SONAME_ST'] = save
+
+ conf.CHECK_INLINE()
+
+ # check for pkgconfig
+ conf.CHECK_CFG(atleast_pkgconfig_version='0.0.0')
+
+ conf.DEFINE('_GNU_SOURCE', 1, add_to_cflags=True)
+ conf.DEFINE('_XOPEN_SOURCE_EXTENDED', 1, add_to_cflags=True)
+
+ # on Tru64 certain features are only available with _OSF_SOURCE set to 1
+ # and _XOPEN_SOURCE set to 600
+ if conf.env['SYSTEM_UNAME_SYSNAME'] == 'OSF1':
+ conf.DEFINE('_OSF_SOURCE', 1, add_to_cflags=True)
+ conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True)
+
+ # SCM_RIGHTS is only avail if _XOPEN_SOURCE iѕ defined on IRIX
+ if conf.env['SYSTEM_UNAME_SYSNAME'] == 'IRIX':
+ conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True)
+ conf.DEFINE('_BSD_TYPES', 1, add_to_cflags=True)
+
+ # Try to find the right extra flags for C99 initialisers
+ for f in ["", "-AC99", "-qlanglvl=extc99", "-qlanglvl=stdc99", "-c99"]:
+ if conf.CHECK_CFLAGS([f], '''
+struct foo {int x;char y;};
+struct foo bar = { .y = 'X', .x = 1 };
+'''):
+ if f != "":
+ conf.ADD_CFLAGS(f)
+ break
+
+ # get the base headers we'll use for the rest of the tests
+ conf.CHECK_HEADERS('stdio.h sys/types.h sys/stat.h stdlib.h stddef.h memory.h string.h',
+ add_headers=True)
+ conf.CHECK_HEADERS('strings.h inttypes.h stdint.h unistd.h minix/config.h', add_headers=True)
+ conf.CHECK_HEADERS('ctype.h', add_headers=True)
+
+ if sys.platform != 'darwin':
+ conf.CHECK_HEADERS('standards.h', add_headers=True)
+
+ conf.CHECK_HEADERS('stdbool.h stdint.h stdarg.h vararg.h', add_headers=True)
+ conf.CHECK_HEADERS('limits.h assert.h')
+
+ # see if we need special largefile flags
+ if not conf.CHECK_LARGEFILE():
+ raise Utils.WafError('Samba requires large file support support, but not available on this platform: sizeof(off_t) < 8')
+
+ if 'HAVE_STDDEF_H' in conf.env and 'HAVE_STDLIB_H' in conf.env:
+ conf.DEFINE('STDC_HEADERS', 1)
+
+ conf.CHECK_HEADERS('sys/time.h time.h', together=True)
+
+ if 'HAVE_SYS_TIME_H' in conf.env and 'HAVE_TIME_H' in conf.env:
+ conf.DEFINE('TIME_WITH_SYS_TIME', 1)
+
+ # cope with different extensions for libraries
+ (root, ext) = os.path.splitext(conf.env.shlib_PATTERN)
+ if ext[0] == '.':
+ conf.define('SHLIBEXT', ext[1:], quote=True)
+ else:
+ conf.define('SHLIBEXT', "so", quote=True)
+
+ # First try a header check for cross-compile friendlyness
+ conf.CHECK_CODE(code = """#ifdef __BYTE_ORDER
+ #define B __BYTE_ORDER
+ #elif defined(BYTE_ORDER)
+ #define B BYTE_ORDER
+ #endif
+
+ #ifdef __LITTLE_ENDIAN
+ #define LITTLE __LITTLE_ENDIAN
+ #elif defined(LITTLE_ENDIAN)
+ #define LITTLE LITTLE_ENDIAN
+ #endif
+
+ #if !defined(LITTLE) || !defined(B) || LITTLE != B
+ #error Not little endian.
+ #endif
+ int main(void) { return 0; }""",
+ addmain=False,
+ headers="endian.h sys/endian.h",
+ define="HAVE_LITTLE_ENDIAN")
+ conf.CHECK_CODE(code = """#ifdef __BYTE_ORDER
+ #define B __BYTE_ORDER
+ #elif defined(BYTE_ORDER)
+ #define B BYTE_ORDER
+ #endif
+
+ #ifdef __BIG_ENDIAN
+ #define BIG __BIG_ENDIAN
+ #elif defined(BIG_ENDIAN)
+ #define BIG BIG_ENDIAN
+ #endif
+
+ #if !defined(BIG) || !defined(B) || BIG != B
+ #error Not big endian.
+ #endif
+ int main(void) { return 0; }""",
+ addmain=False,
+ headers="endian.h sys/endian.h",
+ define="HAVE_BIG_ENDIAN")
+
+ if not conf.CONFIG_SET("HAVE_BIG_ENDIAN") and not conf.CONFIG_SET("HAVE_LITTLE_ENDIAN"):
+ # That didn't work! Do runtime test.
+ conf.CHECK_CODE("""union { int i; char c[sizeof(int)]; } u;
+ u.i = 0x01020304;
+ return u.c[0] == 0x04 && u.c[1] == 0x03 && u.c[2] == 0x02 && u.c[3] == 0x01 ? 0 : 1;""",
+ addmain=True, execute=True,
+ define='HAVE_LITTLE_ENDIAN',
+ msg="Checking for HAVE_LITTLE_ENDIAN - runtime")
+ conf.CHECK_CODE("""union { int i; char c[sizeof(int)]; } u;
+ u.i = 0x01020304;
+ return u.c[0] == 0x01 && u.c[1] == 0x02 && u.c[2] == 0x03 && u.c[3] == 0x04 ? 0 : 1;""",
+ addmain=True, execute=True,
+ define='HAVE_BIG_ENDIAN',
+ msg="Checking for HAVE_BIG_ENDIAN - runtime")
+
+ # Extra sanity check.
+ if conf.CONFIG_SET("HAVE_BIG_ENDIAN") == conf.CONFIG_SET("HAVE_LITTLE_ENDIAN"):
+ Logs.error("Failed endian determination. The PDP-11 is back?")
+ sys.exit(1)
+ else:
+ if conf.CONFIG_SET("HAVE_BIG_ENDIAN"):
+ conf.DEFINE('WORDS_BIGENDIAN', 1)
+
+ # check if signal() takes a void function
+ if conf.CHECK_CODE('return *(signal (0, 0)) (0) == 1',
+ define='RETSIGTYPE_INT',
+ execute=False,
+ headers='signal.h',
+ msg='Checking if signal handlers return int'):
+ conf.DEFINE('RETSIGTYPE', 'int')
+ else:
+ conf.DEFINE('RETSIGTYPE', 'void')
+
+ conf.CHECK_VARIABLE('__FUNCTION__', define='HAVE_FUNCTION_MACRO')
+
+ conf.CHECK_CODE('va_list ap1,ap2; va_copy(ap1,ap2)',
+ define="HAVE_VA_COPY",
+ msg="Checking for va_copy")
+
+ conf.CHECK_CODE('''
+ #define eprintf(...) fprintf(stderr, __VA_ARGS__)
+ eprintf("bla", "bar")
+ ''', define='HAVE__VA_ARGS__MACRO')
+
+ conf.SAMBA_BUILD_ENV()
+
+
+def build(bld):
+ # give a more useful message if the source directory has moved
+ relpath = os_path_relpath(bld.curdir, bld.srcnode.abspath())
+ if relpath.find('../') != -1:
+ Logs.error('bld.curdir %s is not a child of %s' % (bld.curdir, bld.srcnode.abspath()))
+ raise Utils.WafError('''The top source directory has moved. Please run distclean and reconfigure''')
+
+ bld.CHECK_MAKEFLAGS()
+ bld.SETUP_BUILD_GROUPS()
+ bld.ENFORCE_GROUP_ORDERING()
+ bld.CHECK_PROJECT_RULES()
diff --git a/common/check.c b/common/check.c
new file mode 100644
index 0000000..e632af5
--- /dev/null
+++ b/common/check.c
@@ -0,0 +1,474 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Rusty Russell 2009
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+#include "tdb_private.h"
+
+/* Since we opened it, these shouldn't fail unless it's recent corruption. */
+static bool tdb_check_header(struct tdb_context *tdb, tdb_off_t *recovery)
+{
+ struct tdb_header hdr;
+ uint32_t h1, h2;
+
+ if (tdb->methods->tdb_read(tdb, 0, &hdr, sizeof(hdr), 0) == -1)
+ return false;
+ if (strcmp(hdr.magic_food, TDB_MAGIC_FOOD) != 0)
+ goto corrupt;
+
+ CONVERT(hdr);
+ if (hdr.version != TDB_VERSION)
+ goto corrupt;
+
+ if (hdr.rwlocks != 0 &&
+ hdr.rwlocks != TDB_FEATURE_FLAG_MAGIC &&
+ hdr.rwlocks != TDB_HASH_RWLOCK_MAGIC)
+ goto corrupt;
+
+ tdb_header_hash(tdb, &h1, &h2);
+ if (hdr.magic1_hash && hdr.magic2_hash &&
+ (hdr.magic1_hash != h1 || hdr.magic2_hash != h2))
+ goto corrupt;
+
+ if (hdr.hash_size == 0)
+ goto corrupt;
+
+ if (hdr.hash_size != tdb->hash_size)
+ goto corrupt;
+
+ if (hdr.recovery_start != 0 &&
+ hdr.recovery_start < TDB_DATA_START(tdb->hash_size))
+ goto corrupt;
+
+ *recovery = hdr.recovery_start;
+ return true;
+
+corrupt:
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "Header is corrupt\n"));
+ return false;
+}
+
+/* Generic record header check. */
+static bool tdb_check_record(struct tdb_context *tdb,
+ tdb_off_t off,
+ const struct tdb_record *rec)
+{
+ tdb_off_t tailer;
+
+ /* Check rec->next: 0 or points to record offset, aligned. */
+ if (rec->next > 0 && rec->next < TDB_DATA_START(tdb->hash_size)){
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u too small next %u\n",
+ off, rec->next));
+ goto corrupt;
+ }
+ if (rec->next + sizeof(*rec) < rec->next) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u too large next %u\n",
+ off, rec->next));
+ goto corrupt;
+ }
+ if ((rec->next % TDB_ALIGNMENT) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u misaligned next %u\n",
+ off, rec->next));
+ goto corrupt;
+ }
+ if (tdb->methods->tdb_oob(tdb, rec->next, sizeof(*rec), 0))
+ goto corrupt;
+
+ /* Check rec_len: similar to rec->next, implies next record. */
+ if ((rec->rec_len % TDB_ALIGNMENT) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u misaligned length %u\n",
+ off, rec->rec_len));
+ goto corrupt;
+ }
+ /* Must fit tailer. */
+ if (rec->rec_len < sizeof(tailer)) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u too short length %u\n",
+ off, rec->rec_len));
+ goto corrupt;
+ }
+ /* OOB allows "right at the end" access, so this works for last rec. */
+ if (tdb->methods->tdb_oob(tdb, off, sizeof(*rec)+rec->rec_len, 0))
+ goto corrupt;
+
+ /* Check tailer. */
+ if (tdb_ofs_read(tdb, off+sizeof(*rec)+rec->rec_len-sizeof(tailer),
+ &tailer) == -1)
+ goto corrupt;
+ if (tailer != sizeof(*rec) + rec->rec_len) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u invalid tailer\n", off));
+ goto corrupt;
+ }
+
+ return true;
+
+corrupt:
+ tdb->ecode = TDB_ERR_CORRUPT;
+ return false;
+}
+
+/* Grab some bytes: may copy if can't use mmap.
+ Caller has already done bounds check. */
+static TDB_DATA get_bytes(struct tdb_context *tdb,
+ tdb_off_t off, tdb_len_t len)
+{
+ TDB_DATA d;
+
+ d.dsize = len;
+
+ if (tdb->transaction == NULL && tdb->map_ptr != NULL)
+ d.dptr = (unsigned char *)tdb->map_ptr + off;
+ else
+ d.dptr = tdb_alloc_read(tdb, off, d.dsize);
+ return d;
+}
+
+/* Frees data if we're not able to simply use mmap. */
+static void put_bytes(struct tdb_context *tdb, TDB_DATA d)
+{
+ if (tdb->transaction == NULL && tdb->map_ptr != NULL)
+ return;
+ free(d.dptr);
+}
+
+/* We use the excellent Jenkins lookup3 hash; this is based on hash_word2.
+ * See: http://burtleburtle.net/bob/c/lookup3.c
+ */
+#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
+static void hash(uint32_t key, uint32_t *pc, uint32_t *pb)
+{
+ uint32_t a,b,c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + *pc;
+ c += *pb;
+ a += key;
+ c ^= b; c -= rot(b,14);
+ a ^= c; a -= rot(c,11);
+ b ^= a; b -= rot(a,25);
+ c ^= b; c -= rot(b,16);
+ a ^= c; a -= rot(c,4);
+ b ^= a; b -= rot(a,14);
+ c ^= b; c -= rot(b,24);
+ *pc=c; *pb=b;
+}
+
+/*
+ We want to check that all free records are in the free list
+ (only once), and all free list entries are free records. Similarly
+ for each hash chain of used records.
+
+ Doing that naively (without walking hash chains, since we want to be
+ linear) means keeping a list of records which have been seen in each
+ hash chain, and another of records pointed to (ie. next pointers
+ from records and the initial hash chain heads). These two lists
+ should be equal. This will take 8 bytes per record, and require
+ sorting at the end.
+
+ So instead, we record each offset in a bitmap such a way that
+ recording it twice will cancel out. Since each offset should appear
+ exactly twice, the bitmap should be zero at the end.
+
+ The approach was inspired by Bloom Filters (see Wikipedia). For
+ each value, we flip K bits in a bitmap of size N. The number of
+ distinct arrangements is:
+
+ N! / (K! * (N-K)!)
+
+ Of course, not all arrangements are actually distinct, but testing
+ shows this formula to be close enough.
+
+ So, if K == 8 and N == 256, the probability of two things flipping the same
+ bits is 1 in 409,663,695,276,000.
+
+ Given that ldb uses a hash size of 10000, using 32 bytes per hash chain
+ (320k) seems reasonable.
+*/
+#define NUM_HASHES 8
+#define BITMAP_BITS 256
+
+static void bit_flip(unsigned char bits[], unsigned int idx)
+{
+ bits[idx / CHAR_BIT] ^= (1 << (idx % CHAR_BIT));
+}
+
+/* We record offsets in a bitmap for the particular chain it should be in. */
+static void record_offset(unsigned char bits[], tdb_off_t off)
+{
+ uint32_t h1 = off, h2 = 0;
+ unsigned int i;
+
+ /* We get two good hash values out of jhash2, so we use both. Then
+ * we keep going to produce further hash values. */
+ for (i = 0; i < NUM_HASHES / 2; i++) {
+ hash(off, &h1, &h2);
+ bit_flip(bits, h1 % BITMAP_BITS);
+ bit_flip(bits, h2 % BITMAP_BITS);
+ h2++;
+ }
+}
+
+/* Check that an in-use record is valid. */
+static bool tdb_check_used_record(struct tdb_context *tdb,
+ tdb_off_t off,
+ const struct tdb_record *rec,
+ unsigned char **hashes,
+ int (*check)(TDB_DATA, TDB_DATA, void *),
+ void *private_data)
+{
+ TDB_DATA key, data;
+
+ if (!tdb_check_record(tdb, off, rec))
+ return false;
+
+ /* key + data + tailer must fit in record */
+ if (rec->key_len + rec->data_len + sizeof(tdb_off_t) > rec->rec_len) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u too short for contents\n", off));
+ return false;
+ }
+
+ key = get_bytes(tdb, off + sizeof(*rec), rec->key_len);
+ if (!key.dptr)
+ return false;
+
+ if (tdb->hash_fn(&key) != rec->full_hash) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Record offset %u has incorrect hash\n", off));
+ goto fail_put_key;
+ }
+
+ /* Mark this offset as a known value for this hash bucket. */
+ record_offset(hashes[BUCKET(rec->full_hash)+1], off);
+ /* And similarly if the next pointer is valid. */
+ if (rec->next)
+ record_offset(hashes[BUCKET(rec->full_hash)+1], rec->next);
+
+ /* If they supply a check function and this record isn't dead,
+ get data and feed it. */
+ if (check && rec->magic != TDB_DEAD_MAGIC) {
+ data = get_bytes(tdb, off + sizeof(*rec) + rec->key_len,
+ rec->data_len);
+ if (!data.dptr)
+ goto fail_put_key;
+
+ if (check(key, data, private_data) == -1)
+ goto fail_put_data;
+ put_bytes(tdb, data);
+ }
+
+ put_bytes(tdb, key);
+ return true;
+
+fail_put_data:
+ put_bytes(tdb, data);
+fail_put_key:
+ put_bytes(tdb, key);
+ return false;
+}
+
+/* Check that an unused record is valid. */
+static bool tdb_check_free_record(struct tdb_context *tdb,
+ tdb_off_t off,
+ const struct tdb_record *rec,
+ unsigned char **hashes)
+{
+ if (!tdb_check_record(tdb, off, rec))
+ return false;
+
+ /* Mark this offset as a known value for the free list. */
+ record_offset(hashes[0], off);
+ /* And similarly if the next pointer is valid. */
+ if (rec->next)
+ record_offset(hashes[0], rec->next);
+ return true;
+}
+
+/* Slow, but should be very rare. */
+size_t tdb_dead_space(struct tdb_context *tdb, tdb_off_t off)
+{
+ size_t len;
+
+ for (len = 0; off + len < tdb->map_size; len++) {
+ char c;
+ if (tdb->methods->tdb_read(tdb, off, &c, 1, 0))
+ return 0;
+ if (c != 0 && c != 0x42)
+ break;
+ }
+ return len;
+}
+
+_PUBLIC_ int tdb_check(struct tdb_context *tdb,
+ int (*check)(TDB_DATA key, TDB_DATA data, void *private_data),
+ void *private_data)
+{
+ unsigned int h;
+ unsigned char **hashes;
+ tdb_off_t off, recovery_start;
+ struct tdb_record rec;
+ bool found_recovery = false;
+ tdb_len_t dead;
+ bool locked;
+
+ /* Read-only databases use no locking at all: it's best-effort.
+ * We may have a write lock already, so skip that case too. */
+ if (tdb->read_only || tdb->allrecord_lock.count != 0) {
+ locked = false;
+ } else {
+ if (tdb_lockall_read(tdb) == -1)
+ return -1;
+ locked = true;
+ }
+
+ /* Make sure we know true size of the underlying file. */
+ tdb->methods->tdb_oob(tdb, tdb->map_size, 1, 1);
+
+ /* Header must be OK: also gets us the recovery ptr, if any. */
+ if (!tdb_check_header(tdb, &recovery_start))
+ goto unlock;
+
+ /* We should have the whole header, too. */
+ if (tdb->map_size < TDB_DATA_START(tdb->hash_size)) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "File too short for hashes\n"));
+ goto unlock;
+ }
+
+ /* One big malloc: pointers then bit arrays. */
+ hashes = (unsigned char **)calloc(
+ 1, sizeof(hashes[0]) * (1+tdb->hash_size)
+ + BITMAP_BITS / CHAR_BIT * (1+tdb->hash_size));
+ if (!hashes) {
+ tdb->ecode = TDB_ERR_OOM;
+ goto unlock;
+ }
+
+ /* Initialize pointers */
+ hashes[0] = (unsigned char *)(&hashes[1+tdb->hash_size]);
+ for (h = 1; h < 1+tdb->hash_size; h++)
+ hashes[h] = hashes[h-1] + BITMAP_BITS / CHAR_BIT;
+
+ /* Freelist and hash headers are all in a row: read them. */
+ for (h = 0; h < 1+tdb->hash_size; h++) {
+ if (tdb_ofs_read(tdb, FREELIST_TOP + h*sizeof(tdb_off_t),
+ &off) == -1)
+ goto free;
+ if (off)
+ record_offset(hashes[h], off);
+ }
+
+ /* For each record, read it in and check it's ok. */
+ for (off = TDB_DATA_START(tdb->hash_size);
+ off < tdb->map_size;
+ off += sizeof(rec) + rec.rec_len) {
+ if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec),
+ DOCONV()) == -1)
+ goto free;
+ switch (rec.magic) {
+ case TDB_MAGIC:
+ case TDB_DEAD_MAGIC:
+ if (!tdb_check_used_record(tdb, off, &rec, hashes,
+ check, private_data))
+ goto free;
+ break;
+ case TDB_FREE_MAGIC:
+ if (!tdb_check_free_record(tdb, off, &rec, hashes))
+ goto free;
+ break;
+ /* If we crash after ftruncate, we can get zeroes or fill. */
+ case TDB_RECOVERY_INVALID_MAGIC:
+ case 0x42424242:
+ if (recovery_start == off) {
+ found_recovery = true;
+ break;
+ }
+ dead = tdb_dead_space(tdb, off);
+ if (dead < sizeof(rec))
+ goto corrupt;
+
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Dead space at %u-%u (of %u)\n",
+ off, off + dead, tdb->map_size));
+ rec.rec_len = dead - sizeof(rec);
+ break;
+ case TDB_RECOVERY_MAGIC:
+ if (recovery_start != off) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Unexpected recovery record at offset %u\n",
+ off));
+ goto free;
+ }
+ found_recovery = true;
+ break;
+ default: ;
+ corrupt:
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Bad magic 0x%x at offset %u\n",
+ rec.magic, off));
+ goto free;
+ }
+ }
+
+ /* Now, hashes should all be empty: each record exists and is referred
+ * to by one other. */
+ for (h = 0; h < 1+tdb->hash_size; h++) {
+ unsigned int i;
+ for (i = 0; i < BITMAP_BITS / CHAR_BIT; i++) {
+ if (hashes[h][i] != 0) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Hashes do not match records\n"));
+ goto free;
+ }
+ }
+ }
+
+ /* We must have found recovery area if there was one. */
+ if (recovery_start != 0 && !found_recovery) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Expected a recovery area at %u\n",
+ recovery_start));
+ goto free;
+ }
+
+ free(hashes);
+ if (locked) {
+ tdb_unlockall_read(tdb);
+ }
+ return 0;
+
+free:
+ free(hashes);
+unlock:
+ if (locked) {
+ tdb_unlockall_read(tdb);
+ }
+ return -1;
+}
diff --git a/common/dump.c b/common/dump.c
new file mode 100644
index 0000000..5f6a78b
--- /dev/null
+++ b/common/dump.c
@@ -0,0 +1,136 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+static tdb_off_t tdb_dump_record(struct tdb_context *tdb, int hash,
+ tdb_off_t offset)
+{
+ struct tdb_record rec;
+ tdb_off_t tailer_ofs, tailer;
+
+ if (tdb->methods->tdb_read(tdb, offset, (char *)&rec,
+ sizeof(rec), DOCONV()) == -1) {
+ printf("ERROR: failed to read record at %u\n", offset);
+ return 0;
+ }
+
+ printf(" rec: hash=%d offset=0x%08x next=0x%08x rec_len=%u "
+ "key_len=%u data_len=%u full_hash=0x%08x magic=0x%08x\n",
+ hash, offset, rec.next, rec.rec_len, rec.key_len, rec.data_len,
+ rec.full_hash, rec.magic);
+
+ tailer_ofs = offset + sizeof(rec) + rec.rec_len - sizeof(tdb_off_t);
+
+ if (tdb_ofs_read(tdb, tailer_ofs, &tailer) == -1) {
+ printf("ERROR: failed to read tailer at %u\n", tailer_ofs);
+ return rec.next;
+ }
+
+ if (tailer != rec.rec_len + sizeof(rec)) {
+ printf("ERROR: tailer does not match record! tailer=%u totalsize=%u\n",
+ (unsigned int)tailer, (unsigned int)(rec.rec_len + sizeof(rec)));
+ }
+ return rec.next;
+}
+
+static int tdb_dump_chain(struct tdb_context *tdb, int i)
+{
+ tdb_off_t rec_ptr, top;
+
+ top = TDB_HASH_TOP(i);
+
+ if (tdb_lock(tdb, i, F_WRLCK) != 0)
+ return -1;
+
+ if (tdb_ofs_read(tdb, top, &rec_ptr) == -1)
+ return tdb_unlock(tdb, i, F_WRLCK);
+
+ if (rec_ptr)
+ printf("hash=%d\n", i);
+
+ while (rec_ptr) {
+ rec_ptr = tdb_dump_record(tdb, i, rec_ptr);
+ }
+
+ return tdb_unlock(tdb, i, F_WRLCK);
+}
+
+_PUBLIC_ void tdb_dump_all(struct tdb_context *tdb)
+{
+ int i;
+ for (i=0;i<tdb->hash_size;i++) {
+ tdb_dump_chain(tdb, i);
+ }
+ printf("freelist:\n");
+ tdb_dump_chain(tdb, -1);
+}
+
+_PUBLIC_ int tdb_printfreelist(struct tdb_context *tdb)
+{
+ int ret;
+ long total_free = 0;
+ tdb_off_t offset, rec_ptr;
+ struct tdb_record rec;
+
+ if ((ret = tdb_lock(tdb, -1, F_WRLCK)) != 0)
+ return ret;
+
+ offset = FREELIST_TOP;
+
+ /* read in the freelist top */
+ if (tdb_ofs_read(tdb, offset, &rec_ptr) == -1) {
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return 0;
+ }
+
+ printf("freelist top=[0x%08x]\n", rec_ptr );
+ while (rec_ptr) {
+ if (tdb->methods->tdb_read(tdb, rec_ptr, (char *)&rec,
+ sizeof(rec), DOCONV()) == -1) {
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return -1;
+ }
+
+ if (rec.magic != TDB_FREE_MAGIC) {
+ printf("bad magic 0x%08x in free list\n", rec.magic);
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return -1;
+ }
+
+ printf("entry offset=[0x%08x], rec.rec_len = [0x%08x (%u)] (end = 0x%08x)\n",
+ rec_ptr, rec.rec_len, rec.rec_len, rec_ptr + rec.rec_len);
+ total_free += rec.rec_len;
+
+ /* move to the next record */
+ rec_ptr = rec.next;
+ }
+ printf("total rec_len = [0x%08lx (%lu)]\n", total_free, total_free);
+
+ return tdb_unlock(tdb, -1, F_WRLCK);
+}
+
diff --git a/common/error.c b/common/error.c
new file mode 100644
index 0000000..478eb88
--- /dev/null
+++ b/common/error.c
@@ -0,0 +1,57 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+_PUBLIC_ enum TDB_ERROR tdb_error(struct tdb_context *tdb)
+{
+ return tdb->ecode;
+}
+
+static struct tdb_errname {
+ enum TDB_ERROR ecode; const char *estring;
+} emap[] = { {TDB_SUCCESS, "Success"},
+ {TDB_ERR_CORRUPT, "Corrupt database"},
+ {TDB_ERR_IO, "IO Error"},
+ {TDB_ERR_LOCK, "Locking error"},
+ {TDB_ERR_OOM, "Out of memory"},
+ {TDB_ERR_EXISTS, "Record exists"},
+ {TDB_ERR_NOLOCK, "Lock exists on other keys"},
+ {TDB_ERR_EINVAL, "Invalid parameter"},
+ {TDB_ERR_NOEXIST, "Record does not exist"},
+ {TDB_ERR_RDONLY, "write not permitted"} };
+
+/* Error string for the last tdb error */
+_PUBLIC_ const char *tdb_errorstr(struct tdb_context *tdb)
+{
+ uint32_t i;
+ for (i = 0; i < sizeof(emap) / sizeof(struct tdb_errname); i++)
+ if (tdb->ecode == emap[i].ecode)
+ return emap[i].estring;
+ return "Invalid error code";
+}
+
diff --git a/common/freelist.c b/common/freelist.c
new file mode 100644
index 0000000..86fac2f
--- /dev/null
+++ b/common/freelist.c
@@ -0,0 +1,772 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+/* 'right' merges can involve O(n^2) cost when combined with a
+ traverse, so they are disabled until we find a way to do them in
+ O(1) time
+*/
+#define USE_RIGHT_MERGES 0
+
+/* read a freelist record and check for simple errors */
+int tdb_rec_free_read(struct tdb_context *tdb, tdb_off_t off, struct tdb_record *rec)
+{
+ if (tdb->methods->tdb_read(tdb, off, rec, sizeof(*rec),DOCONV()) == -1)
+ return -1;
+
+ if (rec->magic == TDB_MAGIC) {
+ /* this happens when a app is showdown while deleting a record - we should
+ not completely fail when this happens */
+ TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_rec_free_read non-free magic 0x%x at offset=%u - fixing\n",
+ rec->magic, off));
+ rec->magic = TDB_FREE_MAGIC;
+ if (tdb_rec_write(tdb, off, rec) == -1)
+ return -1;
+ }
+
+ if (rec->magic != TDB_FREE_MAGIC) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_rec_free_read bad magic 0x%x at offset=%u\n",
+ rec->magic, off));
+ return -1;
+ }
+ if (tdb->methods->tdb_oob(tdb, rec->next, sizeof(*rec), 0) != 0)
+ return -1;
+ return 0;
+}
+
+
+#if USE_RIGHT_MERGES
+/* Remove an element from the freelist. Must have alloc lock. */
+static int remove_from_freelist(struct tdb_context *tdb, tdb_off_t off, tdb_off_t next)
+{
+ tdb_off_t last_ptr, i;
+
+ /* read in the freelist top */
+ last_ptr = FREELIST_TOP;
+ while (tdb_ofs_read(tdb, last_ptr, &i) != -1 && i != 0) {
+ if (i == off) {
+ /* We've found it! */
+ return tdb_ofs_write(tdb, last_ptr, &next);
+ }
+ /* Follow chain (next offset is at start of record) */
+ last_ptr = i;
+ }
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"remove_from_freelist: not on list at off=%u\n", off));
+ return -1;
+}
+#endif
+
+
+/* update a record tailer (must hold allocation lock) */
+static int update_tailer(struct tdb_context *tdb, tdb_off_t offset,
+ const struct tdb_record *rec)
+{
+ tdb_off_t totalsize;
+
+ /* Offset of tailer from record header */
+ totalsize = sizeof(*rec) + rec->rec_len;
+ return tdb_ofs_write(tdb, offset + totalsize - sizeof(tdb_off_t),
+ &totalsize);
+}
+
+/**
+ * Read the record directly on the left.
+ * Fail if there is no record on the left.
+ */
+static int read_record_on_left(struct tdb_context *tdb, tdb_off_t rec_ptr,
+ tdb_off_t *left_p,
+ struct tdb_record *left_r)
+{
+ tdb_off_t left_ptr;
+ tdb_off_t left_size;
+ struct tdb_record left_rec;
+ int ret;
+
+ left_ptr = rec_ptr - sizeof(tdb_off_t);
+
+ if (left_ptr <= TDB_DATA_START(tdb->hash_size)) {
+ /* no record on the left */
+ return -1;
+ }
+
+ /* Read in tailer and jump back to header */
+ ret = tdb_ofs_read(tdb, left_ptr, &left_size);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,
+ "tdb_free: left offset read failed at %u\n", left_ptr));
+ return -1;
+ }
+
+ /* it could be uninitialised data */
+ if (left_size == 0 || left_size == TDB_PAD_U32) {
+ return -1;
+ }
+
+ if (left_size > rec_ptr) {
+ return -1;
+ }
+
+ left_ptr = rec_ptr - left_size;
+
+ if (left_ptr < TDB_DATA_START(tdb->hash_size)) {
+ return -1;
+ }
+
+ /* Now read in the left record */
+ ret = tdb->methods->tdb_read(tdb, left_ptr, &left_rec,
+ sizeof(left_rec), DOCONV());
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,
+ "tdb_free: left read failed at %u (%u)\n",
+ left_ptr, left_size));
+ return -1;
+ }
+
+ *left_p = left_ptr;
+ *left_r = left_rec;
+
+ return 0;
+}
+
+/**
+ * Merge new freelist record with the direct left neighbour.
+ * This assumes that left_rec represents the record
+ * directly to the left of right_rec and that this is
+ * a freelist record.
+ */
+static int merge_with_left_record(struct tdb_context *tdb,
+ tdb_off_t left_ptr,
+ struct tdb_record *left_rec,
+ struct tdb_record *right_rec)
+{
+ int ret;
+
+ left_rec->rec_len += sizeof(*right_rec) + right_rec->rec_len;
+
+ ret = tdb_rec_write(tdb, left_ptr, left_rec);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,
+ "merge_with_left_record: update_left failed at %u\n",
+ left_ptr));
+ return -1;
+ }
+
+ ret = update_tailer(tdb, left_ptr, left_rec);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,
+ "merge_with_left_record: update_tailer failed at %u\n",
+ left_ptr));
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Check whether the record left of a given freelist record is
+ * also a freelist record, and if so, merge the two records.
+ *
+ * Return code:
+ * -1 upon error
+ * 0 if left was not a free record
+ * 1 if left was free and successfully merged.
+ *
+ * The currend record is handed in with pointer and fully read record.
+ *
+ * The left record pointer and struct can be retrieved as result
+ * in lp and lr;
+ */
+static int check_merge_with_left_record(struct tdb_context *tdb,
+ tdb_off_t rec_ptr,
+ struct tdb_record *rec,
+ tdb_off_t *lp,
+ struct tdb_record *lr)
+{
+ tdb_off_t left_ptr;
+ struct tdb_record left_rec;
+ int ret;
+
+ ret = read_record_on_left(tdb, rec_ptr, &left_ptr, &left_rec);
+ if (ret != 0) {
+ return 0;
+ }
+
+ if (left_rec.magic != TDB_FREE_MAGIC) {
+ return 0;
+ }
+
+ /* It's free - expand to include it. */
+ ret = merge_with_left_record(tdb, left_ptr, &left_rec, rec);
+ if (ret != 0) {
+ return -1;
+ }
+
+ if (lp != NULL) {
+ *lp = left_ptr;
+ }
+
+ if (lr != NULL) {
+ *lr = left_rec;
+ }
+
+ return 1;
+}
+
+/**
+ * Check whether the record left of a given freelist record is
+ * also a freelist record, and if so, merge the two records.
+ *
+ * Return code:
+ * -1 upon error
+ * 0 if left was not a free record
+ * 1 if left was free and successfully merged.
+ *
+ * In this variant, the input record is specified just as the pointer
+ * and is read from the database if needed.
+ *
+ * next_ptr will contain the original record's next pointer after
+ * successful merging (which will be lost after merging), so that
+ * the caller can update the last pointer.
+ */
+static int check_merge_ptr_with_left_record(struct tdb_context *tdb,
+ tdb_off_t rec_ptr,
+ tdb_off_t *next_ptr)
+{
+ tdb_off_t left_ptr;
+ struct tdb_record rec, left_rec;
+ int ret;
+
+ ret = read_record_on_left(tdb, rec_ptr, &left_ptr, &left_rec);
+ if (ret != 0) {
+ return 0;
+ }
+
+ if (left_rec.magic != TDB_FREE_MAGIC) {
+ return 0;
+ }
+
+ /* It's free - expand to include it. */
+
+ ret = tdb->methods->tdb_read(tdb, rec_ptr, &rec,
+ sizeof(rec), DOCONV());
+ if (ret != 0) {
+ return -1;
+ }
+
+ ret = merge_with_left_record(tdb, left_ptr, &left_rec, &rec);
+ if (ret != 0) {
+ return -1;
+ }
+
+ if (next_ptr != NULL) {
+ *next_ptr = rec.next;
+ }
+
+ return 1;
+}
+
+/**
+ * Add an element into the freelist.
+ *
+ * We merge the new record into the left record if it is also a
+ * free record, but not with the right one. This makes the
+ * operation O(1) instead of O(n): merging with the right record
+ * requires a traverse of the freelist to find the previous
+ * record in the free list.
+ *
+ * This prevents db traverses from being O(n^2) after a lot of deletes.
+ */
+int tdb_free(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec)
+{
+ int ret;
+
+ /* Allocation and tailer lock */
+ if (tdb_lock(tdb, -1, F_WRLCK) != 0)
+ return -1;
+
+ /* set an initial tailer, so if we fail we don't leave a bogus record */
+ if (update_tailer(tdb, offset, rec) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_tailer failed!\n"));
+ goto fail;
+ }
+
+#if USE_RIGHT_MERGES
+ /* Look right first (I'm an Australian, dammit) */
+ if (offset + sizeof(*rec) + rec->rec_len + sizeof(*rec) <= tdb->map_size) {
+ tdb_off_t right = offset + sizeof(*rec) + rec->rec_len;
+ struct tdb_record r;
+
+ if (tdb->methods->tdb_read(tdb, right, &r, sizeof(r), DOCONV()) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: right read failed at %u\n", right));
+ goto left;
+ }
+
+ /* If it's free, expand to include it. */
+ if (r.magic == TDB_FREE_MAGIC) {
+ if (remove_from_freelist(tdb, right, r.next) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: right free failed at %u\n", right));
+ goto left;
+ }
+ rec->rec_len += sizeof(r) + r.rec_len;
+ if (update_tailer(tdb, offset, rec) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free: update_tailer failed at %u\n", offset));
+ goto fail;
+ }
+ }
+ }
+left:
+#endif
+
+ ret = check_merge_with_left_record(tdb, offset, rec, NULL, NULL);
+ if (ret == -1) {
+ goto fail;
+ }
+ if (ret == 1) {
+ /* merged */
+ goto done;
+ }
+
+ /* Nothing to merge, prepend to free list */
+
+ rec->magic = TDB_FREE_MAGIC;
+
+ if (tdb_ofs_read(tdb, FREELIST_TOP, &rec->next) == -1 ||
+ tdb_rec_write(tdb, offset, rec) == -1 ||
+ tdb_ofs_write(tdb, FREELIST_TOP, &offset) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_free record write failed at offset=%u\n", offset));
+ goto fail;
+ }
+
+done:
+ /* And we're done. */
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return 0;
+
+ fail:
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return -1;
+}
+
+
+
+/*
+ the core of tdb_allocate - called when we have decided which
+ free list entry to use
+
+ Note that we try to allocate by grabbing data from the end of an existing record,
+ not the beginning. This is so the left merge in a free is more likely to be
+ able to free up the record without fragmentation
+ */
+static tdb_off_t tdb_allocate_ofs(struct tdb_context *tdb,
+ tdb_len_t length, tdb_off_t rec_ptr,
+ struct tdb_record *rec, tdb_off_t last_ptr)
+{
+#define MIN_REC_SIZE (sizeof(struct tdb_record) + sizeof(tdb_off_t) + 8)
+
+ if (rec->rec_len < length + MIN_REC_SIZE) {
+ /* we have to grab the whole record */
+
+ /* unlink it from the previous record */
+ if (tdb_ofs_write(tdb, last_ptr, &rec->next) == -1) {
+ return 0;
+ }
+
+ /* mark it not free */
+ rec->magic = TDB_MAGIC;
+ if (tdb_rec_write(tdb, rec_ptr, rec) == -1) {
+ return 0;
+ }
+ return rec_ptr;
+ }
+
+ /* we're going to just shorten the existing record */
+ rec->rec_len -= (length + sizeof(*rec));
+ if (tdb_rec_write(tdb, rec_ptr, rec) == -1) {
+ return 0;
+ }
+ if (update_tailer(tdb, rec_ptr, rec) == -1) {
+ return 0;
+ }
+
+ /* and setup the new record */
+ rec_ptr += sizeof(*rec) + rec->rec_len;
+
+ memset(rec, '\0', sizeof(*rec));
+ rec->rec_len = length;
+ rec->magic = TDB_MAGIC;
+
+ if (tdb_rec_write(tdb, rec_ptr, rec) == -1) {
+ return 0;
+ }
+
+ if (update_tailer(tdb, rec_ptr, rec) == -1) {
+ return 0;
+ }
+
+ return rec_ptr;
+}
+
+/* allocate some space from the free list. The offset returned points
+ to a unconnected tdb_record within the database with room for at
+ least length bytes of total data
+
+ 0 is returned if the space could not be allocated
+ */
+static tdb_off_t tdb_allocate_from_freelist(
+ struct tdb_context *tdb, tdb_len_t length, struct tdb_record *rec)
+{
+ tdb_off_t rec_ptr, last_ptr, newrec_ptr;
+ struct {
+ tdb_off_t rec_ptr, last_ptr;
+ tdb_len_t rec_len;
+ } bestfit;
+ float multiplier = 1.0;
+ bool merge_created_candidate;
+
+ /* over-allocate to reduce fragmentation */
+ length *= 1.25;
+
+ /* Extra bytes required for tailer */
+ length += sizeof(tdb_off_t);
+ length = TDB_ALIGN(length, TDB_ALIGNMENT);
+
+ again:
+ merge_created_candidate = false;
+ last_ptr = FREELIST_TOP;
+
+ /* read in the freelist top */
+ if (tdb_ofs_read(tdb, FREELIST_TOP, &rec_ptr) == -1)
+ return 0;
+
+ bestfit.rec_ptr = 0;
+ bestfit.last_ptr = 0;
+ bestfit.rec_len = 0;
+
+ /*
+ this is a best fit allocation strategy. Originally we used
+ a first fit strategy, but it suffered from massive fragmentation
+ issues when faced with a slowly increasing record size.
+ */
+ while (rec_ptr) {
+ int ret;
+ tdb_off_t left_ptr;
+ struct tdb_record left_rec;
+
+ if (tdb_rec_free_read(tdb, rec_ptr, rec) == -1) {
+ return 0;
+ }
+
+ ret = check_merge_with_left_record(tdb, rec_ptr, rec,
+ &left_ptr, &left_rec);
+ if (ret == -1) {
+ return 0;
+ }
+ if (ret == 1) {
+ /* merged */
+ rec_ptr = rec->next;
+ ret = tdb_ofs_write(tdb, last_ptr, &rec->next);
+ if (ret == -1) {
+ return 0;
+ }
+
+ /*
+ * We have merged the current record into the left
+ * neighbour. So our traverse of the freelist will
+ * skip it and consider the next record in the chain.
+ *
+ * But the enlarged left neighbour may be a candidate.
+ * If it is, we can not directly use it, though.
+ * The only thing we can do and have to do here is to
+ * update the current best fit size in the chain if the
+ * current best fit is the left record. (By that we may
+ * worsen the best fit we already had, bit this is not a
+ * problem.)
+ *
+ * If the current best fit is not the left record,
+ * all we can do is remember the fact that a merge
+ * created a new candidate so that we can trigger
+ * a second walk of the freelist if at the end of
+ * the first walk we have not found any fit.
+ * This way we can avoid expanding the database.
+ */
+
+ if (bestfit.rec_ptr == left_ptr) {
+ bestfit.rec_len = left_rec.rec_len;
+ }
+
+ if (left_rec.rec_len > length) {
+ merge_created_candidate = true;
+ }
+
+ continue;
+ }
+
+ if (rec->rec_len >= length) {
+ if (bestfit.rec_ptr == 0 ||
+ rec->rec_len < bestfit.rec_len) {
+ bestfit.rec_len = rec->rec_len;
+ bestfit.rec_ptr = rec_ptr;
+ bestfit.last_ptr = last_ptr;
+ }
+ }
+
+ /* move to the next record */
+ last_ptr = rec_ptr;
+ rec_ptr = rec->next;
+
+ /* if we've found a record that is big enough, then
+ stop searching if its also not too big. The
+ definition of 'too big' changes as we scan
+ through */
+ if (bestfit.rec_len > 0 &&
+ bestfit.rec_len < length * multiplier) {
+ break;
+ }
+
+ /* this multiplier means we only extremely rarely
+ search more than 50 or so records. At 50 records we
+ accept records up to 11 times larger than what we
+ want */
+ multiplier *= 1.05;
+ }
+
+ if (bestfit.rec_ptr != 0) {
+ if (tdb_rec_free_read(tdb, bestfit.rec_ptr, rec) == -1) {
+ return 0;
+ }
+
+ newrec_ptr = tdb_allocate_ofs(tdb, length, bestfit.rec_ptr,
+ rec, bestfit.last_ptr);
+ return newrec_ptr;
+ }
+
+ if (merge_created_candidate) {
+ goto again;
+ }
+
+ /* we didn't find enough space. See if we can expand the
+ database and if we can then try again */
+ if (tdb_expand(tdb, length + sizeof(*rec)) == 0)
+ goto again;
+
+ return 0;
+}
+
+static bool tdb_alloc_dead(
+ struct tdb_context *tdb, int hash, tdb_len_t length,
+ tdb_off_t *rec_ptr, struct tdb_record *rec)
+{
+ tdb_off_t last_ptr;
+
+ *rec_ptr = tdb_find_dead(tdb, hash, rec, length, &last_ptr);
+ if (*rec_ptr == 0) {
+ return false;
+ }
+ /*
+ * Unlink the record from the hash chain, it's about to be moved into
+ * another one.
+ */
+ return (tdb_ofs_write(tdb, last_ptr, &rec->next) == 0);
+}
+
+/*
+ * Chain "hash" is assumed to be locked
+ */
+
+tdb_off_t tdb_allocate(struct tdb_context *tdb, int hash, tdb_len_t length,
+ struct tdb_record *rec)
+{
+ tdb_off_t ret;
+ int i;
+
+ if (tdb->max_dead_records == 0) {
+ /*
+ * No dead records to expect anywhere. Do the blocking
+ * freelist lock without trying to steal from others
+ */
+ goto blocking_freelist_allocate;
+ }
+
+ /*
+ * The following loop tries to get the freelist lock nonblocking. If
+ * it gets the lock, allocate from there. If the freelist is busy,
+ * instead of waiting we try to steal dead records from other hash
+ * chains.
+ *
+ * Be aware that we do nonblocking locks on the other hash chains as
+ * well and fail gracefully. This way we avoid deadlocks (we block two
+ * hash chains, something which is pretty bad normally)
+ */
+
+ for (i=0; i<tdb->hash_size; i++) {
+
+ int list;
+
+ list = BUCKET(hash+i);
+
+ if (tdb_lock_nonblock(tdb, list, F_WRLCK) == 0) {
+ bool got_dead;
+
+ got_dead = tdb_alloc_dead(tdb, list, length, &ret, rec);
+ tdb_unlock(tdb, list, F_WRLCK);
+
+ if (got_dead) {
+ return ret;
+ }
+ }
+
+ if (tdb_lock_nonblock(tdb, -1, F_WRLCK) == 0) {
+ /*
+ * Under the freelist lock take the chance to give
+ * back our dead records.
+ */
+ tdb_purge_dead(tdb, hash);
+
+ ret = tdb_allocate_from_freelist(tdb, length, rec);
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return ret;
+ }
+ }
+
+blocking_freelist_allocate:
+
+ if (tdb_lock(tdb, -1, F_WRLCK) == -1) {
+ return 0;
+ }
+ ret = tdb_allocate_from_freelist(tdb, length, rec);
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return ret;
+}
+
+/**
+ * Merge adjacent records in the freelist.
+ */
+static int tdb_freelist_merge_adjacent(struct tdb_context *tdb,
+ int *count_records, int *count_merged)
+{
+ tdb_off_t cur, next;
+ int count = 0;
+ int merged = 0;
+ int ret;
+
+ ret = tdb_lock(tdb, -1, F_RDLCK);
+ if (ret == -1) {
+ return -1;
+ }
+
+ cur = FREELIST_TOP;
+ while (tdb_ofs_read(tdb, cur, &next) == 0 && next != 0) {
+ tdb_off_t next2;
+
+ count++;
+
+ ret = check_merge_ptr_with_left_record(tdb, next, &next2);
+ if (ret == -1) {
+ goto done;
+ }
+ if (ret == 1) {
+ /*
+ * merged:
+ * now let cur->next point to next2 instead of next
+ */
+
+ ret = tdb_ofs_write(tdb, cur, &next2);
+ if (ret != 0) {
+ goto done;
+ }
+
+ next = next2;
+ merged++;
+ }
+
+ cur = next;
+ }
+
+ if (count_records != NULL) {
+ *count_records = count;
+ }
+
+ if (count_merged != NULL) {
+ *count_merged = merged;
+ }
+
+ ret = 0;
+
+done:
+ tdb_unlock(tdb, -1, F_RDLCK);
+ return ret;
+}
+
+/**
+ * return the size of the freelist - no merging done
+ */
+static int tdb_freelist_size_no_merge(struct tdb_context *tdb)
+{
+ tdb_off_t ptr;
+ int count=0;
+
+ if (tdb_lock(tdb, -1, F_RDLCK) == -1) {
+ return -1;
+ }
+
+ ptr = FREELIST_TOP;
+ while (tdb_ofs_read(tdb, ptr, &ptr) == 0 && ptr != 0) {
+ count++;
+ }
+
+ tdb_unlock(tdb, -1, F_RDLCK);
+ return count;
+}
+
+/**
+ * return the size of the freelist - used to decide if we should repack
+ *
+ * As a side effect, adjacent records are merged unless the
+ * database is read-only, in order to reduce the fragmentation
+ * without repacking.
+ */
+_PUBLIC_ int tdb_freelist_size(struct tdb_context *tdb)
+{
+
+ int count = 0;
+
+ if (tdb->read_only) {
+ count = tdb_freelist_size_no_merge(tdb);
+ } else {
+ int ret;
+ ret = tdb_freelist_merge_adjacent(tdb, &count, NULL);
+ if (ret != 0) {
+ return -1;
+ }
+ }
+
+ return count;
+}
diff --git a/common/freelistcheck.c b/common/freelistcheck.c
new file mode 100644
index 0000000..c6bfeaa
--- /dev/null
+++ b/common/freelistcheck.c
@@ -0,0 +1,108 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Jeremy Allison 2006
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+/* Check the freelist is good and contains no loops.
+ Very memory intensive - only do this as a consistency
+ checker. Heh heh - uses an in memory tdb as the storage
+ for the "seen" record list. For some reason this strikes
+ me as extremely clever as I don't have to write another tree
+ data structure implementation :-).
+ */
+
+static int seen_insert(struct tdb_context *mem_tdb, tdb_off_t rec_ptr)
+{
+ TDB_DATA key;
+
+ key.dptr = (unsigned char *)&rec_ptr;
+ key.dsize = sizeof(rec_ptr);
+ return tdb_store(mem_tdb, key, tdb_null, TDB_INSERT);
+}
+
+_PUBLIC_ int tdb_validate_freelist(struct tdb_context *tdb, int *pnum_entries)
+{
+ struct tdb_context *mem_tdb = NULL;
+ struct tdb_record rec;
+ tdb_off_t rec_ptr, last_ptr;
+ int ret = -1;
+
+ *pnum_entries = 0;
+
+ mem_tdb = tdb_open("flval", tdb->hash_size,
+ TDB_INTERNAL, O_RDWR, 0600);
+ if (!mem_tdb) {
+ return -1;
+ }
+
+ if (tdb_lock(tdb, -1, F_WRLCK) == -1) {
+ tdb_close(mem_tdb);
+ return 0;
+ }
+
+ last_ptr = FREELIST_TOP;
+
+ /* Store the FREELIST_TOP record. */
+ if (seen_insert(mem_tdb, last_ptr) == -1) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ ret = -1;
+ goto fail;
+ }
+
+ /* read in the freelist top */
+ if (tdb_ofs_read(tdb, FREELIST_TOP, &rec_ptr) == -1) {
+ goto fail;
+ }
+
+ while (rec_ptr) {
+
+ /* If we can't store this record (we've seen it
+ before) then the free list has a loop and must
+ be corrupt. */
+
+ if (seen_insert(mem_tdb, rec_ptr)) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ ret = -1;
+ goto fail;
+ }
+
+ if (tdb_rec_free_read(tdb, rec_ptr, &rec) == -1) {
+ goto fail;
+ }
+
+ /* move to the next record */
+ last_ptr = rec_ptr;
+ rec_ptr = rec.next;
+ *pnum_entries += 1;
+ }
+
+ ret = 0;
+
+ fail:
+
+ tdb_close(mem_tdb);
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return ret;
+}
diff --git a/common/hash.c b/common/hash.c
new file mode 100644
index 0000000..1eed722
--- /dev/null
+++ b/common/hash.c
@@ -0,0 +1,345 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Rusty Russell 2010
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+#include "tdb_private.h"
+
+/* This is based on the hash algorithm from gdbm */
+unsigned int tdb_old_hash(TDB_DATA *key)
+{
+ uint32_t value; /* Used to compute the hash value. */
+ uint32_t i; /* Used to cycle through random values. */
+
+ /* Set the initial value from the key size. */
+ for (value = 0x238F13AF * key->dsize, i=0; i < key->dsize; i++)
+ value = (value + (key->dptr[i] << (i*5 % 24)));
+
+ return (1103515243 * value + 12345);
+}
+
+#ifndef WORDS_BIGENDIAN
+# define HASH_LITTLE_ENDIAN 1
+# define HASH_BIG_ENDIAN 0
+#else
+# define HASH_LITTLE_ENDIAN 0
+# define HASH_BIG_ENDIAN 1
+#endif
+
+/*
+-------------------------------------------------------------------------------
+lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+
+These are functions for producing 32-bit hashes for hash table lookup.
+hash_word(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
+are externally useful functions. Routines to test the hash are included
+if SELF_TEST is defined. You can use this free for any purpose. It's in
+the public domain. It has no warranty.
+
+You probably want to use hashlittle(). hashlittle() and hashbig()
+hash byte arrays. hashlittle() is is faster than hashbig() on
+little-endian machines. Intel and AMD are little-endian machines.
+On second thought, you probably want hashlittle2(), which is identical to
+hashlittle() except it returns two 32-bit hashes for the price of one.
+You could implement hashbig2() if you wanted but I haven't bothered here.
+
+If you want to find a hash of, say, exactly 7 integers, do
+ a = i1; b = i2; c = i3;
+ mix(a,b,c);
+ a += i4; b += i5; c += i6;
+ mix(a,b,c);
+ a += i7;
+ final(a,b,c);
+then use c as the hash value. If you have a variable length array of
+4-byte integers to hash, use hash_word(). If you have a byte array (like
+a character string), use hashlittle(). If you have several byte arrays, or
+a mix of things, see the comments above hashlittle().
+
+Why is this so big? I read 12 bytes at a time into 3 4-byte integers,
+then mix those integers. This is fast (you can do a lot more thorough
+mixing with 12*3 instructions on 3 integers than you can with 3 instructions
+on 1 byte), but shoehorning those bytes into integers efficiently is messy.
+*/
+
+#define hashsize(n) ((uint32_t)1<<(n))
+#define hashmask(n) (hashsize(n)-1)
+#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
+
+/*
+-------------------------------------------------------------------------------
+mix -- mix 3 32-bit values reversibly.
+
+This is reversible, so any information in (a,b,c) before mix() is
+still in (a,b,c) after mix().
+
+If four pairs of (a,b,c) inputs are run through mix(), or through
+mix() in reverse, there are at least 32 bits of the output that
+are sometimes the same for one pair and different for another pair.
+This was tested for:
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
+satisfy this are
+ 4 6 8 16 19 4
+ 9 15 3 18 27 15
+ 14 9 3 7 17 3
+Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
+for "differ" defined as + with a one-bit base and a two-bit delta. I
+used http://burtleburtle.net/bob/hash/avalanche.html to choose
+the operations, constants, and arrangements of the variables.
+
+This does not achieve avalanche. There are input bits of (a,b,c)
+that fail to affect some output bits of (a,b,c), especially of a. The
+most thoroughly mixed value is c, but it doesn't really even achieve
+avalanche in c.
+
+This allows some parallelism. Read-after-writes are good at doubling
+the number of bits affected, so the goal of mixing pulls in the opposite
+direction as the goal of parallelism. I did what I could. Rotates
+seem to cost as much as shifts on every machine I could lay my hands
+on, and rotates are much kinder to the top and bottom bits, so I used
+rotates.
+-------------------------------------------------------------------------------
+*/
+#define mix(a,b,c) \
+{ \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c,16); c += b; \
+ b -= a; b ^= rot(a,19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
+}
+
+/*
+-------------------------------------------------------------------------------
+final -- final mixing of 3 32-bit values (a,b,c) into c
+
+Pairs of (a,b,c) values differing in only a few bits will usually
+produce values of c that look totally different. This was tested for
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+These constants passed:
+ 14 11 25 16 4 14 24
+ 12 14 25 16 4 14 24
+and these came close:
+ 4 8 15 26 3 22 24
+ 10 8 15 26 3 22 24
+ 11 8 15 26 3 22 24
+-------------------------------------------------------------------------------
+*/
+#define final(a,b,c) \
+{ \
+ c ^= b; c -= rot(b,14); \
+ a ^= c; a -= rot(c,11); \
+ b ^= a; b -= rot(a,25); \
+ c ^= b; c -= rot(b,16); \
+ a ^= c; a -= rot(c,4); \
+ b ^= a; b -= rot(a,14); \
+ c ^= b; c -= rot(b,24); \
+}
+
+
+/*
+-------------------------------------------------------------------------------
+hashlittle() -- hash a variable-length key into a 32-bit value
+ k : the key (the unaligned variable-length array of bytes)
+ length : the length of the key, counting by bytes
+ val2 : IN: can be any 4-byte value OUT: second 32 bit hash.
+Returns a 32-bit value. Every bit of the key affects every bit of
+the return value. Two keys differing by one or two bits will have
+totally different hash values. Note that the return value is better
+mixed than val2, so use that first.
+
+The best hash table sizes are powers of 2. There is no need to do
+mod a prime (mod is sooo slow!). If you need less than 32 bits,
+use a bitmask. For example, if you need only 10 bits, do
+ h = (h & hashmask(10));
+In which case, the hash table should have hashsize(10) elements.
+
+If you are hashing n strings (uint8_t **)k, do it like this:
+ for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
+
+By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
+code any way you wish, private, educational, or commercial. It's free.
+
+Use for hash table lookup, or anything where one collision in 2^^32 is
+acceptable. Do NOT use for cryptographic purposes.
+-------------------------------------------------------------------------------
+*/
+
+static uint32_t hashlittle( const void *key, size_t length )
+{
+ uint32_t a,b,c; /* internal state */
+ union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length);
+
+ u.ptr = key;
+ if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+ const uint8_t *k8;
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
+ case 1 : a+=k8[0]; break;
+ case 0 : return c;
+ }
+ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
+ const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
+ const uint8_t *k8;
+
+ /*--------------- all but last block: aligned reads and different mixing */
+ while (length > 12)
+ {
+ a += k[0] + (((uint32_t)k[1])<<16);
+ b += k[2] + (((uint32_t)k[3])<<16);
+ c += k[4] + (((uint32_t)k[5])<<16);
+ mix(a,b,c);
+ length -= 12;
+ k += 6;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[4]+(((uint32_t)k[5])<<16);
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=k[4];
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=k[2];
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=k[0];
+ break;
+ case 1 : a+=k8[0];
+ break;
+ case 0 : return c; /* zero length requires no mixing */
+ }
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ a += ((uint32_t)k[1])<<8;
+ a += ((uint32_t)k[2])<<16;
+ a += ((uint32_t)k[3])<<24;
+ b += k[4];
+ b += ((uint32_t)k[5])<<8;
+ b += ((uint32_t)k[6])<<16;
+ b += ((uint32_t)k[7])<<24;
+ c += k[8];
+ c += ((uint32_t)k[9])<<8;
+ c += ((uint32_t)k[10])<<16;
+ c += ((uint32_t)k[11])<<24;
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=((uint32_t)k[11])<<24;
+ case 11: c+=((uint32_t)k[10])<<16;
+ case 10: c+=((uint32_t)k[9])<<8;
+ case 9 : c+=k[8];
+ case 8 : b+=((uint32_t)k[7])<<24;
+ case 7 : b+=((uint32_t)k[6])<<16;
+ case 6 : b+=((uint32_t)k[5])<<8;
+ case 5 : b+=k[4];
+ case 4 : a+=((uint32_t)k[3])<<24;
+ case 3 : a+=((uint32_t)k[2])<<16;
+ case 2 : a+=((uint32_t)k[1])<<8;
+ case 1 : a+=k[0];
+ break;
+ case 0 : return c;
+ }
+ }
+
+ final(a,b,c);
+ return c;
+}
+
+_PUBLIC_ unsigned int tdb_jenkins_hash(TDB_DATA *key)
+{
+ return hashlittle(key->dptr, key->dsize);
+}
diff --git a/common/io.c b/common/io.c
new file mode 100644
index 0000000..fe47d18
--- /dev/null
+++ b/common/io.c
@@ -0,0 +1,676 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+
+#include "tdb_private.h"
+
+/*
+ * We prepend the mutex area, so fixup offsets. See mutex.c for details.
+ * tdb->hdr_ofs is 0 or header.mutex_size.
+ *
+ * Note: that we only have the 4GB limit of tdb_off_t for
+ * tdb->map_size. The file size on disk can be 4GB + tdb->hdr_ofs!
+ */
+
+static bool tdb_adjust_offset(struct tdb_context *tdb, off_t *off)
+{
+ off_t tmp = tdb->hdr_ofs + *off;
+
+ if ((tmp < tdb->hdr_ofs) || (tmp < *off)) {
+ errno = EIO;
+ return false;
+ }
+
+ *off = tmp;
+ return true;
+}
+
+static ssize_t tdb_pwrite(struct tdb_context *tdb, const void *buf,
+ size_t count, off_t offset)
+{
+ if (!tdb_adjust_offset(tdb, &offset)) {
+ return -1;
+ }
+ return pwrite(tdb->fd, buf, count, offset);
+}
+
+static ssize_t tdb_pread(struct tdb_context *tdb, void *buf,
+ size_t count, off_t offset)
+{
+ if (!tdb_adjust_offset(tdb, &offset)) {
+ return -1;
+ }
+ return pread(tdb->fd, buf, count, offset);
+}
+
+static int tdb_ftruncate(struct tdb_context *tdb, off_t length)
+{
+ if (!tdb_adjust_offset(tdb, &length)) {
+ return -1;
+ }
+ return ftruncate(tdb->fd, length);
+}
+
+static int tdb_fstat(struct tdb_context *tdb, struct stat *buf)
+{
+ int ret;
+
+ ret = fstat(tdb->fd, buf);
+ if (ret == -1) {
+ return -1;
+ }
+
+ if (buf->st_size < tdb->hdr_ofs) {
+ errno = EIO;
+ return -1;
+ }
+ buf->st_size -= tdb->hdr_ofs;
+
+ return ret;
+}
+
+/* check for an out of bounds access - if it is out of bounds then
+ see if the database has been expanded by someone else and expand
+ if necessary
+*/
+static int tdb_oob(struct tdb_context *tdb, tdb_off_t off, tdb_len_t len,
+ int probe)
+{
+ struct stat st;
+ if (len + off < len) {
+ if (!probe) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_oob off %u len %u wrap\n",
+ off, len));
+ }
+ return -1;
+ }
+
+ if (off + len <= tdb->map_size)
+ return 0;
+ if (tdb->flags & TDB_INTERNAL) {
+ if (!probe) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_oob len %u beyond internal malloc size %u\n",
+ (int)(off + len), (int)tdb->map_size));
+ }
+ return -1;
+ }
+
+ if (tdb_fstat(tdb, &st) == -1) {
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ /* Beware >4G files! */
+ if ((tdb_off_t)st.st_size != st.st_size) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_oob len %llu too large!\n",
+ (long long)st.st_size));
+ return -1;
+ }
+
+ /* Unmap, update size, remap. We do this unconditionally, to handle
+ * the unusual case where the db is truncated.
+ *
+ * This can happen to a child using tdb_reopen_all(true) on a
+ * TDB_CLEAR_IF_FIRST tdb whose parent crashes: the next
+ * opener will truncate the database. */
+ if (tdb_munmap(tdb) == -1) {
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+ tdb->map_size = st.st_size;
+ if (tdb_mmap(tdb) != 0) {
+ return -1;
+ }
+
+ if (st.st_size < (size_t)off + len) {
+ if (!probe) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_oob len %u beyond eof at %u\n",
+ (int)(off + len), (int)st.st_size));
+ }
+ return -1;
+ }
+ return 0;
+}
+
+/* write a lump of data at a specified offset */
+static int tdb_write(struct tdb_context *tdb, tdb_off_t off,
+ const void *buf, tdb_len_t len)
+{
+ if (len == 0) {
+ return 0;
+ }
+
+ if (tdb->read_only || tdb->traverse_read) {
+ tdb->ecode = TDB_ERR_RDONLY;
+ return -1;
+ }
+
+ if (tdb->methods->tdb_oob(tdb, off, len, 0) != 0)
+ return -1;
+
+ if (tdb->map_ptr) {
+ memcpy(off + (char *)tdb->map_ptr, buf, len);
+ } else {
+#ifdef HAVE_INCOHERENT_MMAP
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+#else
+ ssize_t written;
+
+ written = tdb_pwrite(tdb, buf, len, off);
+
+ if ((written != (ssize_t)len) && (written != -1)) {
+ /* try once more */
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_write: wrote only "
+ "%zi of %u bytes at %u, trying once more\n",
+ written, len, off));
+ written = tdb_pwrite(tdb, (const char *)buf+written,
+ len-written, off+written);
+ }
+ if (written == -1) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_write failed at %u "
+ "len=%u (%s)\n", off, len, strerror(errno)));
+ return -1;
+ } else if (written != (ssize_t)len) {
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_write: failed to "
+ "write %u bytes at %u in two attempts\n",
+ len, off));
+ return -1;
+ }
+#endif
+ }
+ return 0;
+}
+
+/* Endian conversion: we only ever deal with 4 byte quantities */
+void *tdb_convert(void *buf, uint32_t size)
+{
+ uint32_t i, *p = (uint32_t *)buf;
+ for (i = 0; i < size / 4; i++)
+ p[i] = TDB_BYTEREV(p[i]);
+ return buf;
+}
+
+
+/* read a lump of data at a specified offset, maybe convert */
+static int tdb_read(struct tdb_context *tdb, tdb_off_t off, void *buf,
+ tdb_len_t len, int cv)
+{
+ if (tdb->methods->tdb_oob(tdb, off, len, 0) != 0) {
+ return -1;
+ }
+
+ if (tdb->map_ptr) {
+ memcpy(buf, off + (char *)tdb->map_ptr, len);
+ } else {
+#ifdef HAVE_INCOHERENT_MMAP
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+#else
+ ssize_t ret;
+
+ ret = tdb_pread(tdb, buf, len, off);
+ if (ret != (ssize_t)len) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_read failed at %u "
+ "len=%u ret=%zi (%s) map_size=%u\n",
+ off, len, ret, strerror(errno),
+ tdb->map_size));
+ return -1;
+ }
+#endif
+ }
+ if (cv) {
+ tdb_convert(buf, len);
+ }
+ return 0;
+}
+
+
+
+/*
+ do an unlocked scan of the hash table heads to find the next non-zero head. The value
+ will then be confirmed with the lock held
+*/
+static void tdb_next_hash_chain(struct tdb_context *tdb, uint32_t *chain)
+{
+ uint32_t h = *chain;
+ if (tdb->map_ptr) {
+ for (;h < tdb->hash_size;h++) {
+ if (0 != *(uint32_t *)(TDB_HASH_TOP(h) + (unsigned char *)tdb->map_ptr)) {
+ break;
+ }
+ }
+ } else {
+ uint32_t off=0;
+ for (;h < tdb->hash_size;h++) {
+ if (tdb_ofs_read(tdb, TDB_HASH_TOP(h), &off) != 0 || off != 0) {
+ break;
+ }
+ }
+ }
+ (*chain) = h;
+}
+
+
+int tdb_munmap(struct tdb_context *tdb)
+{
+ if (tdb->flags & TDB_INTERNAL)
+ return 0;
+
+#ifdef HAVE_MMAP
+ if (tdb->map_ptr) {
+ int ret;
+
+ ret = munmap(tdb->map_ptr, tdb->map_size);
+ if (ret != 0)
+ return ret;
+ }
+#endif
+ tdb->map_ptr = NULL;
+ return 0;
+}
+
+/* If mmap isn't coherent, *everyone* must always mmap. */
+static bool should_mmap(const struct tdb_context *tdb)
+{
+#ifdef HAVE_INCOHERENT_MMAP
+ return true;
+#else
+ return !(tdb->flags & TDB_NOMMAP);
+#endif
+}
+
+int tdb_mmap(struct tdb_context *tdb)
+{
+ if (tdb->flags & TDB_INTERNAL)
+ return 0;
+
+#ifdef HAVE_MMAP
+ if (should_mmap(tdb)) {
+ tdb->map_ptr = mmap(NULL, tdb->map_size,
+ PROT_READ|(tdb->read_only? 0:PROT_WRITE),
+ MAP_SHARED|MAP_FILE, tdb->fd,
+ tdb->hdr_ofs);
+
+ /*
+ * NB. When mmap fails it returns MAP_FAILED *NOT* NULL !!!!
+ */
+
+ if (tdb->map_ptr == MAP_FAILED) {
+ tdb->map_ptr = NULL;
+ TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_mmap failed for size %u (%s)\n",
+ tdb->map_size, strerror(errno)));
+#ifdef HAVE_INCOHERENT_MMAP
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+#endif
+ }
+ } else {
+ tdb->map_ptr = NULL;
+ }
+#else
+ tdb->map_ptr = NULL;
+#endif
+ return 0;
+}
+
+/* expand a file. we prefer to use ftruncate, as that is what posix
+ says to use for mmap expansion */
+static int tdb_expand_file(struct tdb_context *tdb, tdb_off_t size, tdb_off_t addition)
+{
+ char buf[8192];
+ tdb_off_t new_size;
+
+ if (tdb->read_only || tdb->traverse_read) {
+ tdb->ecode = TDB_ERR_RDONLY;
+ return -1;
+ }
+
+ if (!tdb_add_off_t(size, addition, &new_size)) {
+ tdb->ecode = TDB_ERR_OOM;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "expand_file write "
+ "overflow detected current size[%u] addition[%u]!\n",
+ (unsigned)size, (unsigned)addition));
+ errno = ENOSPC;
+ return -1;
+ }
+
+ if (tdb_ftruncate(tdb, new_size) == -1) {
+ char b = 0;
+ ssize_t written = tdb_pwrite(tdb, &b, 1, new_size - 1);
+ if (written == 0) {
+ /* try once more, potentially revealing errno */
+ written = tdb_pwrite(tdb, &b, 1, new_size - 1);
+ }
+ if (written == 0) {
+ /* again - give up, guessing errno */
+ errno = ENOSPC;
+ }
+ if (written != 1) {
+ tdb->ecode = TDB_ERR_OOM;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "expand_file to %u failed (%s)\n",
+ (unsigned)new_size, strerror(errno)));
+ return -1;
+ }
+ }
+
+ /* now fill the file with something. This ensures that the
+ file isn't sparse, which would be very bad if we ran out of
+ disk. This must be done with write, not via mmap */
+ memset(buf, TDB_PAD_BYTE, sizeof(buf));
+ while (addition) {
+ size_t n = addition>sizeof(buf)?sizeof(buf):addition;
+ ssize_t written = tdb_pwrite(tdb, buf, n, size);
+ if (written == 0) {
+ /* prevent infinite loops: try _once_ more */
+ written = tdb_pwrite(tdb, buf, n, size);
+ }
+ if (written == 0) {
+ /* give up, trying to provide a useful errno */
+ tdb->ecode = TDB_ERR_OOM;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "expand_file write "
+ "returned 0 twice: giving up!\n"));
+ errno = ENOSPC;
+ return -1;
+ }
+ if (written == -1) {
+ tdb->ecode = TDB_ERR_OOM;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "expand_file write of "
+ "%u bytes failed (%s)\n", (int)n,
+ strerror(errno)));
+ return -1;
+ }
+ if (written != n) {
+ TDB_LOG((tdb, TDB_DEBUG_WARNING, "expand_file: wrote "
+ "only %zu of %zi bytes - retrying\n", written,
+ n));
+ }
+ addition -= written;
+ size += written;
+ }
+ return 0;
+}
+
+
+/* You need 'size', this tells you how much you should expand by. */
+tdb_off_t tdb_expand_adjust(tdb_off_t map_size, tdb_off_t size, int page_size)
+{
+ tdb_off_t new_size, top_size, increment;
+ tdb_off_t max_size = UINT32_MAX - map_size;
+
+ if (size > max_size) {
+ /*
+ * We can't round up anymore, just give back
+ * what we're asked for.
+ *
+ * The caller has to take care of the ENOSPC handling.
+ */
+ return size;
+ }
+
+ /* limit size in order to avoid using up huge amounts of memory for
+ * in memory tdbs if an oddball huge record creeps in */
+ if (size > 100 * 1024) {
+ increment = size * 2;
+ } else {
+ increment = size * 100;
+ }
+ if (increment < size) {
+ goto overflow;
+ }
+
+ if (!tdb_add_off_t(map_size, increment, &top_size)) {
+ goto overflow;
+ }
+
+ /* always make room for at least top_size more records, and at
+ least 25% more space. if the DB is smaller than 100MiB,
+ otherwise grow it by 10% only. */
+ if (map_size > 100 * 1024 * 1024) {
+ new_size = map_size * 1.10;
+ } else {
+ new_size = map_size * 1.25;
+ }
+ if (new_size < map_size) {
+ goto overflow;
+ }
+
+ /* Round the database up to a multiple of the page size */
+ new_size = MAX(top_size, new_size);
+
+ if (new_size + page_size < new_size) {
+ /* There's a "+" in TDB_ALIGN that might overflow... */
+ goto overflow;
+ }
+
+ return TDB_ALIGN(new_size, page_size) - map_size;
+
+overflow:
+ /*
+ * Somewhere in between we went over 4GB. Make one big jump to
+ * exactly 4GB database size.
+ */
+ return max_size;
+}
+
+/* expand the database at least size bytes by expanding the underlying
+ file and doing the mmap again if necessary */
+int tdb_expand(struct tdb_context *tdb, tdb_off_t size)
+{
+ struct tdb_record rec;
+ tdb_off_t offset;
+ tdb_off_t new_size;
+
+ if (tdb_lock(tdb, -1, F_WRLCK) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "lock failed in tdb_expand\n"));
+ return -1;
+ }
+
+ /* must know about any previous expansions by another process */
+ tdb->methods->tdb_oob(tdb, tdb->map_size, 1, 1);
+
+ /*
+ * Note: that we don't care about tdb->hdr_ofs != 0 here
+ *
+ * The 4GB limitation is just related to tdb->map_size
+ * and the offset calculation in the records.
+ *
+ * The file on disk can be up to 4GB + tdb->hdr_ofs
+ */
+ size = tdb_expand_adjust(tdb->map_size, size, tdb->page_size);
+
+ if (!tdb_add_off_t(tdb->map_size, size, &new_size)) {
+ tdb->ecode = TDB_ERR_OOM;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_expand "
+ "overflow detected current map_size[%u] size[%u]!\n",
+ (unsigned)tdb->map_size, (unsigned)size));
+ goto fail;
+ }
+
+ /* form a new freelist record */
+ offset = tdb->map_size;
+ memset(&rec,'\0',sizeof(rec));
+ rec.rec_len = size - sizeof(rec);
+
+ if (tdb->flags & TDB_INTERNAL) {
+ char *new_map_ptr;
+
+ new_map_ptr = (char *)realloc(tdb->map_ptr, new_size);
+ if (!new_map_ptr) {
+ tdb->ecode = TDB_ERR_OOM;
+ goto fail;
+ }
+ tdb->map_ptr = new_map_ptr;
+ tdb->map_size = new_size;
+ } else {
+ int ret;
+
+ /*
+ * expand the file itself
+ */
+ ret = tdb->methods->tdb_expand_file(tdb, tdb->map_size, size);
+ if (ret != 0) {
+ goto fail;
+ }
+
+ /* Explicitly remap: if we're in a transaction, this won't
+ * happen automatically! */
+ tdb_munmap(tdb);
+ tdb->map_size = new_size;
+ if (tdb_mmap(tdb) != 0) {
+ goto fail;
+ }
+ }
+
+ /* link it into the free list */
+ if (tdb_free(tdb, offset, &rec) == -1)
+ goto fail;
+
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return 0;
+ fail:
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return -1;
+}
+
+/* read/write a tdb_off_t */
+int tdb_ofs_read(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d)
+{
+ return tdb->methods->tdb_read(tdb, offset, (char*)d, sizeof(*d), DOCONV());
+}
+
+int tdb_ofs_write(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d)
+{
+ tdb_off_t off = *d;
+ return tdb->methods->tdb_write(tdb, offset, CONVERT(off), sizeof(*d));
+}
+
+
+/* read a lump of data, allocating the space for it */
+unsigned char *tdb_alloc_read(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t len)
+{
+ unsigned char *buf;
+
+ /* some systems don't like zero length malloc */
+
+ if (!(buf = (unsigned char *)malloc(len ? len : 1))) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_OOM;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,"tdb_alloc_read malloc failed len=%u (%s)\n",
+ len, strerror(errno)));
+ return NULL;
+ }
+ if (tdb->methods->tdb_read(tdb, offset, buf, len, 0) == -1) {
+ SAFE_FREE(buf);
+ return NULL;
+ }
+ return buf;
+}
+
+/* Give a piece of tdb data to a parser */
+
+int tdb_parse_data(struct tdb_context *tdb, TDB_DATA key,
+ tdb_off_t offset, tdb_len_t len,
+ int (*parser)(TDB_DATA key, TDB_DATA data,
+ void *private_data),
+ void *private_data)
+{
+ TDB_DATA data;
+ int result;
+
+ data.dsize = len;
+
+ if ((tdb->transaction == NULL) && (tdb->map_ptr != NULL)) {
+ /*
+ * Optimize by avoiding the malloc/memcpy/free, point the
+ * parser directly at the mmap area.
+ */
+ if (tdb->methods->tdb_oob(tdb, offset, len, 0) != 0) {
+ return -1;
+ }
+ data.dptr = offset + (unsigned char *)tdb->map_ptr;
+ return parser(key, data, private_data);
+ }
+
+ if (!(data.dptr = tdb_alloc_read(tdb, offset, len))) {
+ return -1;
+ }
+
+ result = parser(key, data, private_data);
+ free(data.dptr);
+ return result;
+}
+
+/* read/write a record */
+int tdb_rec_read(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec)
+{
+ if (tdb->methods->tdb_read(tdb, offset, rec, sizeof(*rec),DOCONV()) == -1)
+ return -1;
+ if (TDB_BAD_MAGIC(rec)) {
+ /* Ensure ecode is set for log fn. */
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_rec_read bad magic 0x%x at offset=%u\n", rec->magic, offset));
+ return -1;
+ }
+ return tdb->methods->tdb_oob(tdb, rec->next, sizeof(*rec), 0);
+}
+
+int tdb_rec_write(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec)
+{
+ struct tdb_record r = *rec;
+ return tdb->methods->tdb_write(tdb, offset, CONVERT(r), sizeof(r));
+}
+
+static const struct tdb_methods io_methods = {
+ tdb_read,
+ tdb_write,
+ tdb_next_hash_chain,
+ tdb_oob,
+ tdb_expand_file,
+};
+
+/*
+ initialise the default methods table
+*/
+void tdb_io_init(struct tdb_context *tdb)
+{
+ tdb->methods = &io_methods;
+}
diff --git a/common/lock.c b/common/lock.c
new file mode 100644
index 0000000..195dbb5
--- /dev/null
+++ b/common/lock.c
@@ -0,0 +1,982 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+_PUBLIC_ void tdb_setalarm_sigptr(struct tdb_context *tdb, volatile sig_atomic_t *ptr)
+{
+ tdb->interrupt_sig_ptr = ptr;
+}
+
+static int fcntl_lock(struct tdb_context *tdb,
+ int rw, off_t off, off_t len, bool waitflag)
+{
+ struct flock fl;
+ int cmd;
+
+#ifdef USE_TDB_MUTEX_LOCKING
+ {
+ int ret;
+ if (tdb_mutex_lock(tdb, rw, off, len, waitflag, &ret)) {
+ return ret;
+ }
+ }
+#endif
+
+ fl.l_type = rw;
+ fl.l_whence = SEEK_SET;
+ fl.l_start = off;
+ fl.l_len = len;
+ fl.l_pid = 0;
+
+ cmd = waitflag ? F_SETLKW : F_SETLK;
+
+ return fcntl(tdb->fd, cmd, &fl);
+}
+
+static int fcntl_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len)
+{
+ struct flock fl;
+#if 0 /* Check they matched up locks and unlocks correctly. */
+ char line[80];
+ FILE *locks;
+ bool found = false;
+
+ locks = fopen("/proc/locks", "r");
+
+ while (fgets(line, 80, locks)) {
+ char *p;
+ int type, start, l;
+
+ /* eg. 1: FLOCK ADVISORY WRITE 2440 08:01:2180826 0 EOF */
+ p = strchr(line, ':') + 1;
+ if (strncmp(p, " POSIX ADVISORY ", strlen(" POSIX ADVISORY ")))
+ continue;
+ p += strlen(" FLOCK ADVISORY ");
+ if (strncmp(p, "READ ", strlen("READ ")) == 0)
+ type = F_RDLCK;
+ else if (strncmp(p, "WRITE ", strlen("WRITE ")) == 0)
+ type = F_WRLCK;
+ else
+ abort();
+ p += 6;
+ if (atoi(p) != getpid())
+ continue;
+ p = strchr(strchr(p, ' ') + 1, ' ') + 1;
+ start = atoi(p);
+ p = strchr(p, ' ') + 1;
+ if (strncmp(p, "EOF", 3) == 0)
+ l = 0;
+ else
+ l = atoi(p) - start + 1;
+
+ if (off == start) {
+ if (len != l) {
+ fprintf(stderr, "Len %u should be %u: %s",
+ (int)len, l, line);
+ abort();
+ }
+ if (type != rw) {
+ fprintf(stderr, "Type %s wrong: %s",
+ rw == F_RDLCK ? "READ" : "WRITE", line);
+ abort();
+ }
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ fprintf(stderr, "Unlock on %u@%u not found!\n",
+ (int)off, (int)len);
+ abort();
+ }
+
+ fclose(locks);
+#endif
+
+#ifdef USE_TDB_MUTEX_LOCKING
+ {
+ int ret;
+ if (tdb_mutex_unlock(tdb, rw, off, len, &ret)) {
+ return ret;
+ }
+ }
+#endif
+
+ fl.l_type = F_UNLCK;
+ fl.l_whence = SEEK_SET;
+ fl.l_start = off;
+ fl.l_len = len;
+ fl.l_pid = 0;
+
+ return fcntl(tdb->fd, F_SETLKW, &fl);
+}
+
+/* list -1 is the alloc list, otherwise a hash chain. */
+static tdb_off_t lock_offset(int list)
+{
+ return FREELIST_TOP + 4*list;
+}
+
+/* a byte range locking function - return 0 on success
+ this functions locks/unlocks "len" byte at the specified offset.
+
+ On error, errno is also set so that errors are passed back properly
+ through tdb_open().
+
+ note that a len of zero means lock to end of file
+*/
+int tdb_brlock(struct tdb_context *tdb,
+ int rw_type, tdb_off_t offset, size_t len,
+ enum tdb_lock_flags flags)
+{
+ int ret;
+
+ if (tdb->flags & TDB_NOLOCK) {
+ return 0;
+ }
+
+ if (flags & TDB_LOCK_MARK_ONLY) {
+ return 0;
+ }
+
+ if ((rw_type == F_WRLCK) && (tdb->read_only || tdb->traverse_read)) {
+ tdb->ecode = TDB_ERR_RDONLY;
+ return -1;
+ }
+
+ do {
+ ret = fcntl_lock(tdb, rw_type, offset, len,
+ flags & TDB_LOCK_WAIT);
+ /* Check for a sigalarm break. */
+ if (ret == -1 && errno == EINTR &&
+ tdb->interrupt_sig_ptr &&
+ *tdb->interrupt_sig_ptr) {
+ break;
+ }
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1) {
+ tdb->ecode = TDB_ERR_LOCK;
+ /* Generic lock error. errno set by fcntl.
+ * EAGAIN is an expected return from non-blocking
+ * locks. */
+ if (!(flags & TDB_LOCK_PROBE) && errno != EAGAIN) {
+ TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brlock failed (fd=%d) at offset %u rw_type=%d flags=%d len=%zu\n",
+ tdb->fd, offset, rw_type, flags, len));
+ }
+ return -1;
+ }
+ return 0;
+}
+
+int tdb_brunlock(struct tdb_context *tdb,
+ int rw_type, tdb_off_t offset, size_t len)
+{
+ int ret;
+
+ if (tdb->flags & TDB_NOLOCK) {
+ return 0;
+ }
+
+ do {
+ ret = fcntl_unlock(tdb, rw_type, offset, len);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_brunlock failed (fd=%d) at offset %u rw_type=%u len=%zu\n",
+ tdb->fd, offset, rw_type, len));
+ }
+ return ret;
+}
+
+/*
+ * Do a tdb_brlock in a loop. Some OSes (such as solaris) have too
+ * conservative deadlock detection and claim a deadlock when progress can be
+ * made. For those OSes we may loop for a while.
+ */
+
+static int tdb_brlock_retry(struct tdb_context *tdb,
+ int rw_type, tdb_off_t offset, size_t len,
+ enum tdb_lock_flags flags)
+{
+ int count = 1000;
+
+ while (count--) {
+ struct timeval tv;
+ int ret;
+
+ ret = tdb_brlock(tdb, rw_type, offset, len, flags);
+ if (ret == 0) {
+ return 0;
+ }
+ if (errno != EDEADLK) {
+ break;
+ }
+ /* sleep for as short a time as we can - more portable than usleep() */
+ tv.tv_sec = 0;
+ tv.tv_usec = 1;
+ select(0, NULL, NULL, NULL, &tv);
+ }
+ return -1;
+}
+
+/*
+ upgrade a read lock to a write lock.
+*/
+int tdb_allrecord_upgrade(struct tdb_context *tdb)
+{
+ int ret;
+
+ if (tdb->allrecord_lock.count != 1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_allrecord_upgrade failed: count %u too high\n",
+ tdb->allrecord_lock.count));
+ return -1;
+ }
+
+ if (tdb->allrecord_lock.off != 1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_allrecord_upgrade failed: already upgraded?\n"));
+ return -1;
+ }
+
+ if (tdb_have_mutexes(tdb)) {
+ ret = tdb_mutex_allrecord_upgrade(tdb);
+ if (ret == -1) {
+ goto fail;
+ }
+ ret = tdb_brlock_retry(tdb, F_WRLCK, lock_offset(tdb->hash_size),
+ 0, TDB_LOCK_WAIT|TDB_LOCK_PROBE);
+ if (ret == -1) {
+ tdb_mutex_allrecord_downgrade(tdb);
+ }
+ } else {
+ ret = tdb_brlock_retry(tdb, F_WRLCK, FREELIST_TOP, 0,
+ TDB_LOCK_WAIT|TDB_LOCK_PROBE);
+ }
+
+ if (ret == 0) {
+ tdb->allrecord_lock.ltype = F_WRLCK;
+ tdb->allrecord_lock.off = 0;
+ return 0;
+ }
+fail:
+ TDB_LOG((tdb, TDB_DEBUG_TRACE,"tdb_allrecord_upgrade failed\n"));
+ return -1;
+}
+
+static struct tdb_lock_type *find_nestlock(struct tdb_context *tdb,
+ tdb_off_t offset)
+{
+ unsigned int i;
+
+ for (i=0; i<tdb->num_lockrecs; i++) {
+ if (tdb->lockrecs[i].off == offset) {
+ return &tdb->lockrecs[i];
+ }
+ }
+ return NULL;
+}
+
+/* lock an offset in the database. */
+int tdb_nest_lock(struct tdb_context *tdb, uint32_t offset, int ltype,
+ enum tdb_lock_flags flags)
+{
+ struct tdb_lock_type *new_lck;
+
+ if (offset >= lock_offset(tdb->hash_size)) {
+ tdb->ecode = TDB_ERR_LOCK;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,"tdb_lock: invalid offset %u for ltype=%d\n",
+ offset, ltype));
+ return -1;
+ }
+ if (tdb->flags & TDB_NOLOCK)
+ return 0;
+
+ new_lck = find_nestlock(tdb, offset);
+ if (new_lck) {
+ /*
+ * Just increment the in-memory struct, posix locks
+ * don't stack.
+ */
+ new_lck->count++;
+ return 0;
+ }
+
+ if (tdb->num_lockrecs == tdb->lockrecs_array_length) {
+ new_lck = (struct tdb_lock_type *)realloc(
+ tdb->lockrecs,
+ sizeof(*tdb->lockrecs) * (tdb->num_lockrecs+1));
+ if (new_lck == NULL) {
+ errno = ENOMEM;
+ return -1;
+ }
+ tdb->lockrecs_array_length = tdb->num_lockrecs+1;
+ tdb->lockrecs = new_lck;
+ }
+
+ /* Since fcntl locks don't nest, we do a lock for the first one,
+ and simply bump the count for future ones */
+ if (tdb_brlock(tdb, ltype, offset, 1, flags)) {
+ return -1;
+ }
+
+ new_lck = &tdb->lockrecs[tdb->num_lockrecs];
+
+ new_lck->off = offset;
+ new_lck->count = 1;
+ new_lck->ltype = ltype;
+ tdb->num_lockrecs++;
+
+ return 0;
+}
+
+static int tdb_lock_and_recover(struct tdb_context *tdb)
+{
+ int ret;
+
+ /* We need to match locking order in transaction commit. */
+ if (tdb_brlock(tdb, F_WRLCK, FREELIST_TOP, 0, TDB_LOCK_WAIT)) {
+ return -1;
+ }
+
+ if (tdb_brlock(tdb, F_WRLCK, OPEN_LOCK, 1, TDB_LOCK_WAIT)) {
+ tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
+ return -1;
+ }
+
+ ret = tdb_transaction_recover(tdb);
+
+ tdb_brunlock(tdb, F_WRLCK, OPEN_LOCK, 1);
+ tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
+
+ return ret;
+}
+
+static bool have_data_locks(const struct tdb_context *tdb)
+{
+ unsigned int i;
+
+ for (i = 0; i < tdb->num_lockrecs; i++) {
+ if (tdb->lockrecs[i].off >= lock_offset(-1))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * A allrecord lock allows us to avoid per chain locks. Check if the allrecord
+ * lock is strong enough.
+ */
+static int tdb_lock_covered_by_allrecord_lock(struct tdb_context *tdb,
+ int ltype)
+{
+ if (ltype == F_RDLCK) {
+ /*
+ * The allrecord_lock is equal (F_RDLCK) or stronger
+ * (F_WRLCK). Pass.
+ */
+ return 0;
+ }
+
+ if (tdb->allrecord_lock.ltype == F_RDLCK) {
+ /*
+ * We ask for ltype==F_WRLCK, but the allrecord_lock
+ * is too weak. We can't upgrade here, so fail.
+ */
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ /*
+ * Asking for F_WRLCK, allrecord is F_WRLCK as well. Pass.
+ */
+ return 0;
+}
+
+static int tdb_lock_list(struct tdb_context *tdb, int list, int ltype,
+ enum tdb_lock_flags waitflag)
+{
+ int ret;
+ bool check = false;
+
+ if (tdb->allrecord_lock.count) {
+ return tdb_lock_covered_by_allrecord_lock(tdb, ltype);
+ }
+
+ /*
+ * Check for recoveries: Someone might have kill -9'ed a process
+ * during a commit.
+ */
+ check = !have_data_locks(tdb);
+ ret = tdb_nest_lock(tdb, lock_offset(list), ltype, waitflag);
+
+ if (ret == 0 && check && tdb_needs_recovery(tdb)) {
+ tdb_nest_unlock(tdb, lock_offset(list), ltype, false);
+
+ if (tdb_lock_and_recover(tdb) == -1) {
+ return -1;
+ }
+ return tdb_lock_list(tdb, list, ltype, waitflag);
+ }
+ return ret;
+}
+
+/* lock a list in the database. list -1 is the alloc list */
+int tdb_lock(struct tdb_context *tdb, int list, int ltype)
+{
+ int ret;
+
+ ret = tdb_lock_list(tdb, list, ltype, TDB_LOCK_WAIT);
+ if (ret) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_lock failed on list %d "
+ "ltype=%d (%s)\n", list, ltype, strerror(errno)));
+ }
+ return ret;
+}
+
+/* lock a list in the database. list -1 is the alloc list. non-blocking lock */
+_PUBLIC_ int tdb_lock_nonblock(struct tdb_context *tdb, int list, int ltype)
+{
+ return tdb_lock_list(tdb, list, ltype, TDB_LOCK_NOWAIT);
+}
+
+
+int tdb_nest_unlock(struct tdb_context *tdb, uint32_t offset, int ltype,
+ bool mark_lock)
+{
+ int ret = -1;
+ struct tdb_lock_type *lck;
+
+ if (tdb->flags & TDB_NOLOCK)
+ return 0;
+
+ /* Sanity checks */
+ if (offset >= lock_offset(tdb->hash_size)) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: offset %u invalid (%d)\n", offset, tdb->hash_size));
+ return ret;
+ }
+
+ lck = find_nestlock(tdb, offset);
+ if ((lck == NULL) || (lck->count == 0)) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: count is 0\n"));
+ return -1;
+ }
+
+ if (lck->count > 1) {
+ lck->count--;
+ return 0;
+ }
+
+ /*
+ * This lock has count==1 left, so we need to unlock it in the
+ * kernel. We don't bother with decrementing the in-memory array
+ * element, we're about to overwrite it with the last array element
+ * anyway.
+ */
+
+ if (mark_lock) {
+ ret = 0;
+ } else {
+ ret = tdb_brunlock(tdb, ltype, offset, 1);
+ }
+
+ /*
+ * Shrink the array by overwriting the element just unlocked with the
+ * last array element.
+ */
+ *lck = tdb->lockrecs[--tdb->num_lockrecs];
+
+ /*
+ * We don't bother with realloc when the array shrinks, but if we have
+ * a completely idle tdb we should get rid of the locked array.
+ */
+
+ if (ret)
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlock: An error occurred unlocking!\n"));
+ return ret;
+}
+
+_PUBLIC_ int tdb_unlock(struct tdb_context *tdb, int list, int ltype)
+{
+ /* a global lock allows us to avoid per chain locks */
+ if (tdb->allrecord_lock.count) {
+ return tdb_lock_covered_by_allrecord_lock(tdb, ltype);
+ }
+
+ return tdb_nest_unlock(tdb, lock_offset(list), ltype, false);
+}
+
+/*
+ get the transaction lock
+ */
+int tdb_transaction_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags lockflags)
+{
+ return tdb_nest_lock(tdb, TRANSACTION_LOCK, ltype, lockflags);
+}
+
+/*
+ release the transaction lock
+ */
+int tdb_transaction_unlock(struct tdb_context *tdb, int ltype)
+{
+ return tdb_nest_unlock(tdb, TRANSACTION_LOCK, ltype, false);
+}
+
+/* Returns 0 if all done, -1 if error, 1 if ok. */
+static int tdb_allrecord_check(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags, bool upgradable)
+{
+ /* There are no locks on read-only dbs */
+ if (tdb->read_only || tdb->traverse_read) {
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (tdb->allrecord_lock.count && tdb->allrecord_lock.ltype == ltype) {
+ tdb->allrecord_lock.count++;
+ return 0;
+ }
+
+ if (tdb->allrecord_lock.count) {
+ /* a global lock of a different type exists */
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (tdb_have_extra_locks(tdb)) {
+ /* can't combine global and chain locks */
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (upgradable && ltype != F_RDLCK) {
+ /* tdb error: you can't upgrade a write lock! */
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+ return 1;
+}
+
+/* We only need to lock individual bytes, but Linux merges consecutive locks
+ * so we lock in contiguous ranges. */
+static int tdb_chainlock_gradual(struct tdb_context *tdb,
+ int ltype, enum tdb_lock_flags flags,
+ size_t off, size_t len)
+{
+ int ret;
+ enum tdb_lock_flags nb_flags = (flags & ~TDB_LOCK_WAIT);
+
+ if (len <= 4) {
+ /* Single record. Just do blocking lock. */
+ return tdb_brlock(tdb, ltype, off, len, flags);
+ }
+
+ /* First we try non-blocking. */
+ ret = tdb_brlock(tdb, ltype, off, len, nb_flags);
+ if (ret == 0) {
+ return 0;
+ }
+
+ /* Try locking first half, then second. */
+ ret = tdb_chainlock_gradual(tdb, ltype, flags, off, len / 2);
+ if (ret == -1)
+ return -1;
+
+ ret = tdb_chainlock_gradual(tdb, ltype, flags,
+ off + len / 2, len - len / 2);
+ if (ret == -1) {
+ tdb_brunlock(tdb, ltype, off, len / 2);
+ return -1;
+ }
+ return 0;
+}
+
+/* lock/unlock entire database. It can only be upgradable if you have some
+ * other way of guaranteeing exclusivity (ie. transaction write lock).
+ * We do the locking gradually to avoid being starved by smaller locks. */
+int tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags, bool upgradable)
+{
+ int ret;
+
+ switch (tdb_allrecord_check(tdb, ltype, flags, upgradable)) {
+ case -1:
+ return -1;
+ case 0:
+ return 0;
+ }
+
+ /* We cover two kinds of locks:
+ * 1) Normal chain locks. Taken for almost all operations.
+ * 2) Individual records locks. Taken after normal or free
+ * chain locks.
+ *
+ * It is (1) which cause the starvation problem, so we're only
+ * gradual for that. */
+
+ if (tdb_have_mutexes(tdb)) {
+ ret = tdb_mutex_allrecord_lock(tdb, ltype, flags);
+ } else {
+ ret = tdb_chainlock_gradual(tdb, ltype, flags, FREELIST_TOP,
+ tdb->hash_size * 4);
+ }
+
+ if (ret == -1) {
+ return -1;
+ }
+
+ /* Grab individual record locks. */
+ if (tdb_brlock(tdb, ltype, lock_offset(tdb->hash_size), 0,
+ flags) == -1) {
+ if (tdb_have_mutexes(tdb)) {
+ tdb_mutex_allrecord_unlock(tdb);
+ } else {
+ tdb_brunlock(tdb, ltype, FREELIST_TOP,
+ tdb->hash_size * 4);
+ }
+ return -1;
+ }
+
+ tdb->allrecord_lock.count = 1;
+ /* If it's upgradable, it's actually exclusive so we can treat
+ * it as a write lock. */
+ tdb->allrecord_lock.ltype = upgradable ? F_WRLCK : ltype;
+ tdb->allrecord_lock.off = upgradable;
+
+ if (tdb_needs_recovery(tdb)) {
+ bool mark = flags & TDB_LOCK_MARK_ONLY;
+ tdb_allrecord_unlock(tdb, ltype, mark);
+ if (mark) {
+ tdb->ecode = TDB_ERR_LOCK;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "tdb_lockall_mark cannot do recovery\n"));
+ return -1;
+ }
+ if (tdb_lock_and_recover(tdb) == -1) {
+ return -1;
+ }
+ return tdb_allrecord_lock(tdb, ltype, flags, upgradable);
+ }
+
+ return 0;
+}
+
+
+
+/* unlock entire db */
+int tdb_allrecord_unlock(struct tdb_context *tdb, int ltype, bool mark_lock)
+{
+ /* There are no locks on read-only dbs */
+ if (tdb->read_only || tdb->traverse_read) {
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (tdb->allrecord_lock.count == 0) {
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ /* Upgradable locks are marked as write locks. */
+ if (tdb->allrecord_lock.ltype != ltype
+ && (!tdb->allrecord_lock.off || ltype != F_RDLCK)) {
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (tdb->allrecord_lock.count > 1) {
+ tdb->allrecord_lock.count--;
+ return 0;
+ }
+
+ if (!mark_lock) {
+ int ret;
+
+ if (tdb_have_mutexes(tdb)) {
+ ret = tdb_mutex_allrecord_unlock(tdb);
+ if (ret == 0) {
+ ret = tdb_brunlock(tdb, ltype,
+ lock_offset(tdb->hash_size),
+ 0);
+ }
+ } else {
+ ret = tdb_brunlock(tdb, ltype, FREELIST_TOP, 0);
+ }
+
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_unlockall failed "
+ "(%s)\n", strerror(errno)));
+ return -1;
+ }
+ }
+
+ tdb->allrecord_lock.count = 0;
+ tdb->allrecord_lock.ltype = 0;
+
+ return 0;
+}
+
+/* lock entire database with write lock */
+_PUBLIC_ int tdb_lockall(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_lockall");
+ return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
+}
+
+/* lock entire database with write lock - mark only */
+_PUBLIC_ int tdb_lockall_mark(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_lockall_mark");
+ return tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_MARK_ONLY, false);
+}
+
+/* unlock entire database with write lock - unmark only */
+_PUBLIC_ int tdb_lockall_unmark(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_lockall_unmark");
+ return tdb_allrecord_unlock(tdb, F_WRLCK, true);
+}
+
+/* lock entire database with write lock - nonblocking varient */
+_PUBLIC_ int tdb_lockall_nonblock(struct tdb_context *tdb)
+{
+ int ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_NOWAIT, false);
+ tdb_trace_ret(tdb, "tdb_lockall_nonblock", ret);
+ return ret;
+}
+
+/* unlock entire database with write lock */
+_PUBLIC_ int tdb_unlockall(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_unlockall");
+ return tdb_allrecord_unlock(tdb, F_WRLCK, false);
+}
+
+/* lock entire database with read lock */
+_PUBLIC_ int tdb_lockall_read(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_lockall_read");
+ return tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, false);
+}
+
+/* lock entire database with read lock - nonblock varient */
+_PUBLIC_ int tdb_lockall_read_nonblock(struct tdb_context *tdb)
+{
+ int ret = tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_NOWAIT, false);
+ tdb_trace_ret(tdb, "tdb_lockall_read_nonblock", ret);
+ return ret;
+}
+
+/* unlock entire database with read lock */
+_PUBLIC_ int tdb_unlockall_read(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_unlockall_read");
+ return tdb_allrecord_unlock(tdb, F_RDLCK, false);
+}
+
+/* lock/unlock one hash chain. This is meant to be used to reduce
+ contention - it cannot guarantee how many records will be locked */
+_PUBLIC_ int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
+{
+ int ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
+ tdb_trace_1rec(tdb, "tdb_chainlock", key);
+ return ret;
+}
+
+/* lock/unlock one hash chain, non-blocking. This is meant to be used
+ to reduce contention - it cannot guarantee how many records will be
+ locked */
+_PUBLIC_ int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key)
+{
+ int ret = tdb_lock_nonblock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
+ tdb_trace_1rec_ret(tdb, "tdb_chainlock_nonblock", key, ret);
+ return ret;
+}
+
+/* mark a chain as locked without actually locking it. Warning! use with great caution! */
+_PUBLIC_ int tdb_chainlock_mark(struct tdb_context *tdb, TDB_DATA key)
+{
+ int ret = tdb_nest_lock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))),
+ F_WRLCK, TDB_LOCK_MARK_ONLY);
+ tdb_trace_1rec(tdb, "tdb_chainlock_mark", key);
+ return ret;
+}
+
+/* unmark a chain as locked without actually locking it. Warning! use with great caution! */
+_PUBLIC_ int tdb_chainlock_unmark(struct tdb_context *tdb, TDB_DATA key)
+{
+ tdb_trace_1rec(tdb, "tdb_chainlock_unmark", key);
+ return tdb_nest_unlock(tdb, lock_offset(BUCKET(tdb->hash_fn(&key))),
+ F_WRLCK, true);
+}
+
+_PUBLIC_ int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
+{
+ tdb_trace_1rec(tdb, "tdb_chainunlock", key);
+ return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_WRLCK);
+}
+
+_PUBLIC_ int tdb_chainlock_read(struct tdb_context *tdb, TDB_DATA key)
+{
+ int ret;
+ ret = tdb_lock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
+ tdb_trace_1rec(tdb, "tdb_chainlock_read", key);
+ return ret;
+}
+
+_PUBLIC_ int tdb_chainunlock_read(struct tdb_context *tdb, TDB_DATA key)
+{
+ tdb_trace_1rec(tdb, "tdb_chainunlock_read", key);
+ return tdb_unlock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
+}
+
+_PUBLIC_ int tdb_chainlock_read_nonblock(struct tdb_context *tdb, TDB_DATA key)
+{
+ int ret = tdb_lock_nonblock(tdb, BUCKET(tdb->hash_fn(&key)), F_RDLCK);
+ tdb_trace_1rec_ret(tdb, "tdb_chainlock_read_nonblock", key, ret);
+ return ret;
+}
+
+/* record lock stops delete underneath */
+int tdb_lock_record(struct tdb_context *tdb, tdb_off_t off)
+{
+ if (tdb->allrecord_lock.count) {
+ return 0;
+ }
+ return off ? tdb_brlock(tdb, F_RDLCK, off, 1, TDB_LOCK_WAIT) : 0;
+}
+
+/*
+ Write locks override our own fcntl readlocks, so check it here.
+ Note this is meant to be F_SETLK, *not* F_SETLKW, as it's not
+ an error to fail to get the lock here.
+*/
+int tdb_write_lock_record(struct tdb_context *tdb, tdb_off_t off)
+{
+ struct tdb_traverse_lock *i;
+ for (i = &tdb->travlocks; i; i = i->next)
+ if (i->off == off)
+ return -1;
+ if (tdb->allrecord_lock.count) {
+ if (tdb->allrecord_lock.ltype == F_WRLCK) {
+ return 0;
+ }
+ return -1;
+ }
+ return tdb_brlock(tdb, F_WRLCK, off, 1, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE);
+}
+
+int tdb_write_unlock_record(struct tdb_context *tdb, tdb_off_t off)
+{
+ if (tdb->allrecord_lock.count) {
+ return 0;
+ }
+ return tdb_brunlock(tdb, F_WRLCK, off, 1);
+}
+
+/* fcntl locks don't stack: avoid unlocking someone else's */
+int tdb_unlock_record(struct tdb_context *tdb, tdb_off_t off)
+{
+ struct tdb_traverse_lock *i;
+ uint32_t count = 0;
+
+ if (tdb->allrecord_lock.count) {
+ return 0;
+ }
+
+ if (off == 0)
+ return 0;
+ for (i = &tdb->travlocks; i; i = i->next)
+ if (i->off == off)
+ count++;
+ return (count == 1 ? tdb_brunlock(tdb, F_RDLCK, off, 1) : 0);
+}
+
+bool tdb_have_extra_locks(struct tdb_context *tdb)
+{
+ unsigned int extra = tdb->num_lockrecs;
+
+ /* A transaction holds the lock for all records. */
+ if (!tdb->transaction && tdb->allrecord_lock.count) {
+ return true;
+ }
+
+ /* We always hold the active lock if CLEAR_IF_FIRST. */
+ if (find_nestlock(tdb, ACTIVE_LOCK)) {
+ extra--;
+ }
+
+ /* In a transaction, we expect to hold the transaction lock */
+ if (tdb->transaction && find_nestlock(tdb, TRANSACTION_LOCK)) {
+ extra--;
+ }
+
+ return extra;
+}
+
+/* The transaction code uses this to remove all locks. */
+void tdb_release_transaction_locks(struct tdb_context *tdb)
+{
+ unsigned int i, active = 0;
+
+ if (tdb->allrecord_lock.count != 0) {
+ tdb_allrecord_unlock(tdb, tdb->allrecord_lock.ltype, false);
+ tdb->allrecord_lock.count = 0;
+ }
+
+ for (i=0;i<tdb->num_lockrecs;i++) {
+ struct tdb_lock_type *lck = &tdb->lockrecs[i];
+
+ /* Don't release the active lock! Copy it to first entry. */
+ if (lck->off == ACTIVE_LOCK) {
+ tdb->lockrecs[active++] = *lck;
+ } else {
+ tdb_brunlock(tdb, lck->ltype, lck->off, 1);
+ }
+ }
+ tdb->num_lockrecs = active;
+}
+
+/* Following functions are added specifically to support CTDB. */
+
+/* Don't do actual fcntl locking, just mark tdb locked */
+int tdb_transaction_write_lock_mark(struct tdb_context *tdb);
+_PUBLIC_ int tdb_transaction_write_lock_mark(struct tdb_context *tdb)
+{
+ return tdb_transaction_lock(tdb, F_WRLCK, TDB_LOCK_MARK_ONLY);
+}
+
+/* Don't do actual fcntl unlocking, just mark tdb unlocked */
+int tdb_transaction_write_lock_unmark(struct tdb_context *tdb);
+_PUBLIC_ int tdb_transaction_write_lock_unmark(struct tdb_context *tdb)
+{
+ return tdb_nest_unlock(tdb, TRANSACTION_LOCK, F_WRLCK, true);
+}
diff --git a/common/mutex.c b/common/mutex.c
new file mode 100644
index 0000000..fae43d4
--- /dev/null
+++ b/common/mutex.c
@@ -0,0 +1,1027 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Volker Lendecke 2012,2013
+ Copyright (C) Stefan Metzmacher 2013,2014
+ Copyright (C) Michael Adam 2014
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+#include "tdb_private.h"
+#include "system/threads.h"
+
+#ifdef USE_TDB_MUTEX_LOCKING
+
+/*
+ * If we run with mutexes, we store the "struct tdb_mutexes" at the
+ * beginning of the file. We store an additional tdb_header right
+ * beyond the mutex area, page aligned. All the offsets within the tdb
+ * are relative to the area behind the mutex area. tdb->map_ptr points
+ * behind the mmap area as well, so the read and write path in the
+ * mutex case can remain unchanged.
+ *
+ * Early in the mutex development the mutexes were placed between the hash
+ * chain pointers and the real tdb data. This had two drawbacks: First, it
+ * made pointer calculations more complex. Second, we had to mmap the mutex
+ * area twice. One was the normal map_ptr in the tdb. This frequently changed
+ * from within tdb_oob. At least the Linux glibc robust mutex code assumes
+ * constant pointers in memory, so a constantly changing mmap area destroys
+ * the mutex list. So we had to mmap the first bytes of the file with a second
+ * mmap call. With that scheme, very weird errors happened that could be
+ * easily fixed by doing the mutex mmap in a second file. It seemed that
+ * mapping the same memory area twice does not end up in accessing the same
+ * physical page, looking at the mutexes in gdb it seemed that old data showed
+ * up after some re-mapping. To avoid a separate mutex file, the code now puts
+ * the real content of the tdb file after the mutex area. This way we do not
+ * have overlapping mmap areas, the mutex area is mmapped once and not
+ * changed, the tdb data area's mmap is constantly changed but does not
+ * overlap.
+ */
+
+struct tdb_mutexes {
+ struct tdb_header hdr;
+
+ /* protect allrecord_lock */
+ pthread_mutex_t allrecord_mutex;
+
+ /*
+ * F_UNLCK: free,
+ * F_RDLCK: shared,
+ * F_WRLCK: exclusive
+ */
+ short int allrecord_lock;
+
+ /*
+ * Index 0 is the freelist mutex, followed by
+ * one mutex per hashchain.
+ */
+ pthread_mutex_t hashchains[1];
+};
+
+bool tdb_have_mutexes(struct tdb_context *tdb)
+{
+ return ((tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) != 0);
+}
+
+size_t tdb_mutex_size(struct tdb_context *tdb)
+{
+ size_t mutex_size;
+
+ if (!tdb_have_mutexes(tdb)) {
+ return 0;
+ }
+
+ mutex_size = sizeof(struct tdb_mutexes);
+ mutex_size += tdb->hash_size * sizeof(pthread_mutex_t);
+
+ return TDB_ALIGN(mutex_size, tdb->page_size);
+}
+
+/*
+ * Get the index for a chain mutex
+ */
+static bool tdb_mutex_index(struct tdb_context *tdb, off_t off, off_t len,
+ unsigned *idx)
+{
+ /*
+ * Weird but true: We fcntl lock 1 byte at an offset 4 bytes before
+ * the 4 bytes of the freelist start and the hash chain that is about
+ * to be locked. See lock_offset() where the freelist is -1 vs the
+ * "+1" in TDB_HASH_TOP(). Because the mutex array is represented in
+ * the tdb file itself as data, we need to adjust the offset here.
+ */
+ const off_t freelist_lock_ofs = FREELIST_TOP - sizeof(tdb_off_t);
+
+ if (!tdb_have_mutexes(tdb)) {
+ return false;
+ }
+ if (len != 1) {
+ /* Possibly the allrecord lock */
+ return false;
+ }
+ if (off < freelist_lock_ofs) {
+ /* One of the special locks */
+ return false;
+ }
+ if (tdb->hash_size == 0) {
+ /* tdb not initialized yet, called from tdb_open_ex() */
+ return false;
+ }
+ if (off >= TDB_DATA_START(tdb->hash_size)) {
+ /* Single record lock from traverses */
+ return false;
+ }
+
+ /*
+ * Now we know it's a freelist or hash chain lock. Those are always 4
+ * byte aligned. Paranoia check.
+ */
+ if ((off % sizeof(tdb_off_t)) != 0) {
+ abort();
+ }
+
+ /*
+ * Re-index the fcntl offset into an offset into the mutex array
+ */
+ off -= freelist_lock_ofs; /* rebase to index 0 */
+ off /= sizeof(tdb_off_t); /* 0 for freelist 1-n for hashchain */
+
+ *idx = off;
+ return true;
+}
+
+static bool tdb_have_mutex_chainlocks(struct tdb_context *tdb)
+{
+ size_t i;
+
+ for (i=0; i < tdb->num_lockrecs; i++) {
+ bool ret;
+ unsigned idx;
+
+ ret = tdb_mutex_index(tdb,
+ tdb->lockrecs[i].off,
+ tdb->lockrecs[i].count,
+ &idx);
+ if (!ret) {
+ continue;
+ }
+
+ if (idx == 0) {
+ /* this is the freelist mutex */
+ continue;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+static int chain_mutex_lock(pthread_mutex_t *m, bool waitflag)
+{
+ int ret;
+
+ if (waitflag) {
+ ret = pthread_mutex_lock(m);
+ } else {
+ ret = pthread_mutex_trylock(m);
+ }
+ if (ret != EOWNERDEAD) {
+ return ret;
+ }
+
+ /*
+ * For chainlocks, we don't do any cleanup (yet?)
+ */
+ return pthread_mutex_consistent(m);
+}
+
+static int allrecord_mutex_lock(struct tdb_mutexes *m, bool waitflag)
+{
+ int ret;
+
+ if (waitflag) {
+ ret = pthread_mutex_lock(&m->allrecord_mutex);
+ } else {
+ ret = pthread_mutex_trylock(&m->allrecord_mutex);
+ }
+ if (ret != EOWNERDEAD) {
+ return ret;
+ }
+
+ /*
+ * The allrecord lock holder died. We need to reset the allrecord_lock
+ * to F_UNLCK. This should also be the indication for
+ * tdb_needs_recovery.
+ */
+ m->allrecord_lock = F_UNLCK;
+
+ return pthread_mutex_consistent(&m->allrecord_mutex);
+}
+
+bool tdb_mutex_lock(struct tdb_context *tdb, int rw, off_t off, off_t len,
+ bool waitflag, int *pret)
+{
+ struct tdb_mutexes *m = tdb->mutexes;
+ pthread_mutex_t *chain;
+ int ret;
+ unsigned idx;
+ bool allrecord_ok;
+
+ if (!tdb_mutex_index(tdb, off, len, &idx)) {
+ return false;
+ }
+ chain = &m->hashchains[idx];
+
+again:
+ ret = chain_mutex_lock(chain, waitflag);
+ if (ret == EBUSY) {
+ ret = EAGAIN;
+ }
+ if (ret != 0) {
+ errno = ret;
+ goto fail;
+ }
+
+ if (idx == 0) {
+ /*
+ * This is a freelist lock, which is independent to
+ * the allrecord lock. So we're done once we got the
+ * freelist mutex.
+ */
+ *pret = 0;
+ return true;
+ }
+
+ if (tdb_have_mutex_chainlocks(tdb)) {
+ /*
+ * We can only check the allrecord lock once. If we do it with
+ * one chain mutex locked, we will deadlock with the allrecord
+ * locker process in the following way: We lock the first hash
+ * chain, we check for the allrecord lock. We keep the hash
+ * chain locked. Then the allrecord locker locks the
+ * allrecord_mutex. It walks the list of chain mutexes,
+ * locking them all in sequence. Meanwhile, we have the chain
+ * mutex locked, so the allrecord locker blocks trying to lock
+ * our chain mutex. Then we come in and try to lock the second
+ * chain lock, which in most cases will be the freelist. We
+ * see that the allrecord lock is locked and put ourselves on
+ * the allrecord_mutex. This will never be signalled though
+ * because the allrecord locker waits for us to give up the
+ * chain lock.
+ */
+
+ *pret = 0;
+ return true;
+ }
+
+ /*
+ * Check if someone is has the allrecord lock: queue if so.
+ */
+
+ allrecord_ok = false;
+
+ if (m->allrecord_lock == F_UNLCK) {
+ /*
+ * allrecord lock not taken
+ */
+ allrecord_ok = true;
+ }
+
+ if ((m->allrecord_lock == F_RDLCK) && (rw == F_RDLCK)) {
+ /*
+ * allrecord shared lock taken, but we only want to read
+ */
+ allrecord_ok = true;
+ }
+
+ if (allrecord_ok) {
+ *pret = 0;
+ return true;
+ }
+
+ ret = pthread_mutex_unlock(chain);
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock"
+ "(chain_mutex) failed: %s\n", strerror(ret)));
+ errno = ret;
+ goto fail;
+ }
+ ret = allrecord_mutex_lock(m, waitflag);
+ if (ret == EBUSY) {
+ ret = EAGAIN;
+ }
+ if (ret != 0) {
+ if (waitflag || (ret != EAGAIN)) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_%slock"
+ "(allrecord_mutex) failed: %s\n",
+ waitflag ? "" : "try_", strerror(ret)));
+ }
+ errno = ret;
+ goto fail;
+ }
+ ret = pthread_mutex_unlock(&m->allrecord_mutex);
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock"
+ "(allrecord_mutex) failed: %s\n", strerror(ret)));
+ errno = ret;
+ goto fail;
+ }
+ goto again;
+
+fail:
+ *pret = -1;
+ return true;
+}
+
+bool tdb_mutex_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len,
+ int *pret)
+{
+ struct tdb_mutexes *m = tdb->mutexes;
+ pthread_mutex_t *chain;
+ int ret;
+ unsigned idx;
+
+ if (!tdb_mutex_index(tdb, off, len, &idx)) {
+ return false;
+ }
+ chain = &m->hashchains[idx];
+
+ ret = pthread_mutex_unlock(chain);
+ if (ret == 0) {
+ *pret = 0;
+ return true;
+ }
+ errno = ret;
+ *pret = -1;
+ return true;
+}
+
+int tdb_mutex_allrecord_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags)
+{
+ struct tdb_mutexes *m = tdb->mutexes;
+ int ret;
+ uint32_t i;
+ bool waitflag = (flags & TDB_LOCK_WAIT);
+ int saved_errno;
+
+ if (tdb->flags & TDB_NOLOCK) {
+ return 0;
+ }
+
+ if (flags & TDB_LOCK_MARK_ONLY) {
+ return 0;
+ }
+
+ ret = allrecord_mutex_lock(m, waitflag);
+ if (!waitflag && (ret == EBUSY)) {
+ errno = EAGAIN;
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+ if (ret != 0) {
+ if (!(flags & TDB_LOCK_PROBE)) {
+ TDB_LOG((tdb, TDB_DEBUG_TRACE,
+ "allrecord_mutex_lock() failed: %s\n",
+ strerror(ret)));
+ }
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (m->allrecord_lock != F_UNLCK) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "allrecord_lock == %d\n",
+ (int)m->allrecord_lock));
+ goto fail_unlock_allrecord_mutex;
+ }
+ m->allrecord_lock = (ltype == F_RDLCK) ? F_RDLCK : F_WRLCK;
+
+ for (i=0; i<tdb->hash_size; i++) {
+
+ /* ignore hashchains[0], the freelist */
+ pthread_mutex_t *chain = &m->hashchains[i+1];
+
+ ret = chain_mutex_lock(chain, waitflag);
+ if (!waitflag && (ret == EBUSY)) {
+ errno = EAGAIN;
+ goto fail_unroll_allrecord_lock;
+ }
+ if (ret != 0) {
+ if (!(flags & TDB_LOCK_PROBE)) {
+ TDB_LOG((tdb, TDB_DEBUG_TRACE,
+ "chain_mutex_lock() failed: %s\n",
+ strerror(ret)));
+ }
+ errno = ret;
+ goto fail_unroll_allrecord_lock;
+ }
+
+ ret = pthread_mutex_unlock(chain);
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock"
+ "(chainlock) failed: %s\n", strerror(ret)));
+ errno = ret;
+ goto fail_unroll_allrecord_lock;
+ }
+ }
+ /*
+ * We leave this routine with m->allrecord_mutex locked
+ */
+ return 0;
+
+fail_unroll_allrecord_lock:
+ m->allrecord_lock = F_UNLCK;
+
+fail_unlock_allrecord_mutex:
+ saved_errno = errno;
+ ret = pthread_mutex_unlock(&m->allrecord_mutex);
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock"
+ "(allrecord_mutex) failed: %s\n", strerror(ret)));
+ }
+ errno = saved_errno;
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+}
+
+int tdb_mutex_allrecord_upgrade(struct tdb_context *tdb)
+{
+ struct tdb_mutexes *m = tdb->mutexes;
+ int ret;
+ uint32_t i;
+
+ if (tdb->flags & TDB_NOLOCK) {
+ return 0;
+ }
+
+ /*
+ * Our only caller tdb_allrecord_upgrade()
+ * garantees that we already own the allrecord lock.
+ *
+ * Which means m->allrecord_mutex is still locked by us.
+ */
+
+ if (m->allrecord_lock != F_RDLCK) {
+ tdb->ecode = TDB_ERR_LOCK;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "allrecord_lock == %d\n",
+ (int)m->allrecord_lock));
+ return -1;
+ }
+
+ m->allrecord_lock = F_WRLCK;
+
+ for (i=0; i<tdb->hash_size; i++) {
+
+ /* ignore hashchains[0], the freelist */
+ pthread_mutex_t *chain = &m->hashchains[i+1];
+
+ ret = chain_mutex_lock(chain, true);
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_lock"
+ "(chainlock) failed: %s\n", strerror(ret)));
+ goto fail_unroll_allrecord_lock;
+ }
+
+ ret = pthread_mutex_unlock(chain);
+ if (ret != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock"
+ "(chainlock) failed: %s\n", strerror(ret)));
+ goto fail_unroll_allrecord_lock;
+ }
+ }
+
+ return 0;
+
+fail_unroll_allrecord_lock:
+ m->allrecord_lock = F_RDLCK;
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+}
+
+void tdb_mutex_allrecord_downgrade(struct tdb_context *tdb)
+{
+ struct tdb_mutexes *m = tdb->mutexes;
+
+ /*
+ * Our only caller tdb_allrecord_upgrade() (in the error case)
+ * garantees that we already own the allrecord lock.
+ *
+ * Which means m->allrecord_mutex is still locked by us.
+ */
+
+ if (m->allrecord_lock != F_WRLCK) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "allrecord_lock == %d\n",
+ (int)m->allrecord_lock));
+ return;
+ }
+
+ m->allrecord_lock = F_RDLCK;
+ return;
+}
+
+
+int tdb_mutex_allrecord_unlock(struct tdb_context *tdb)
+{
+ struct tdb_mutexes *m = tdb->mutexes;
+ short old;
+ int ret;
+
+ if (tdb->flags & TDB_NOLOCK) {
+ return 0;
+ }
+
+ /*
+ * Our only callers tdb_allrecord_unlock() and
+ * tdb_allrecord_lock() (in the error path)
+ * garantee that we already own the allrecord lock.
+ *
+ * Which means m->allrecord_mutex is still locked by us.
+ */
+
+ if ((m->allrecord_lock != F_RDLCK) && (m->allrecord_lock != F_WRLCK)) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "allrecord_lock == %d\n",
+ (int)m->allrecord_lock));
+ return -1;
+ }
+
+ old = m->allrecord_lock;
+ m->allrecord_lock = F_UNLCK;
+
+ ret = pthread_mutex_unlock(&m->allrecord_mutex);
+ if (ret != 0) {
+ m->allrecord_lock = old;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "pthread_mutex_unlock"
+ "(allrecord_mutex) failed: %s\n", strerror(ret)));
+ return -1;
+ }
+ return 0;
+}
+
+int tdb_mutex_init(struct tdb_context *tdb)
+{
+ struct tdb_mutexes *m;
+ pthread_mutexattr_t ma;
+ int i, ret;
+
+ ret = tdb_mutex_mmap(tdb);
+ if (ret == -1) {
+ return -1;
+ }
+ m = tdb->mutexes;
+
+ ret = pthread_mutexattr_init(&ma);
+ if (ret != 0) {
+ goto fail_munmap;
+ }
+ ret = pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_ERRORCHECK);
+ if (ret != 0) {
+ goto fail;
+ }
+ ret = pthread_mutexattr_setpshared(&ma, PTHREAD_PROCESS_SHARED);
+ if (ret != 0) {
+ goto fail;
+ }
+ ret = pthread_mutexattr_setrobust(&ma, PTHREAD_MUTEX_ROBUST);
+ if (ret != 0) {
+ goto fail;
+ }
+
+ for (i=0; i<tdb->hash_size+1; i++) {
+ pthread_mutex_t *chain = &m->hashchains[i];
+
+ ret = pthread_mutex_init(chain, &ma);
+ if (ret != 0) {
+ goto fail;
+ }
+ }
+
+ m->allrecord_lock = F_UNLCK;
+
+ ret = pthread_mutex_init(&m->allrecord_mutex, &ma);
+ if (ret != 0) {
+ goto fail;
+ }
+ ret = 0;
+fail:
+ pthread_mutexattr_destroy(&ma);
+fail_munmap:
+ tdb_mutex_munmap(tdb);
+
+ if (ret == 0) {
+ return 0;
+ }
+
+ errno = ret;
+ return -1;
+}
+
+int tdb_mutex_mmap(struct tdb_context *tdb)
+{
+ size_t len;
+ void *ptr;
+
+ len = tdb_mutex_size(tdb);
+ if (len == 0) {
+ return 0;
+ }
+
+ ptr = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_FILE,
+ tdb->fd, 0);
+ if (ptr == MAP_FAILED) {
+ return -1;
+ }
+ tdb->mutexes = (struct tdb_mutexes *)ptr;
+
+ return 0;
+}
+
+int tdb_mutex_munmap(struct tdb_context *tdb)
+{
+ size_t len;
+
+ len = tdb_mutex_size(tdb);
+ if (len == 0) {
+ return 0;
+ }
+
+ return munmap(tdb->mutexes, len);
+}
+
+static bool tdb_mutex_locking_cached;
+
+static bool tdb_mutex_locking_supported(void)
+{
+ pthread_mutexattr_t ma;
+ pthread_mutex_t m;
+ int ret;
+ static bool initialized;
+
+ if (initialized) {
+ return tdb_mutex_locking_cached;
+ }
+
+ initialized = true;
+
+ ret = pthread_mutexattr_init(&ma);
+ if (ret != 0) {
+ return false;
+ }
+ ret = pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_ERRORCHECK);
+ if (ret != 0) {
+ goto cleanup_ma;
+ }
+ ret = pthread_mutexattr_setpshared(&ma, PTHREAD_PROCESS_SHARED);
+ if (ret != 0) {
+ goto cleanup_ma;
+ }
+ ret = pthread_mutexattr_setrobust(&ma, PTHREAD_MUTEX_ROBUST);
+ if (ret != 0) {
+ goto cleanup_ma;
+ }
+ ret = pthread_mutex_init(&m, &ma);
+ if (ret != 0) {
+ goto cleanup_ma;
+ }
+ ret = pthread_mutex_lock(&m);
+ if (ret != 0) {
+ goto cleanup_m;
+ }
+ /*
+ * This makes sure we have real mutexes
+ * from a threading library instead of just
+ * stubs from libc.
+ */
+ ret = pthread_mutex_lock(&m);
+ if (ret != EDEADLK) {
+ goto cleanup_lock;
+ }
+ ret = pthread_mutex_unlock(&m);
+ if (ret != 0) {
+ goto cleanup_m;
+ }
+
+ tdb_mutex_locking_cached = true;
+ goto cleanup_m;
+
+cleanup_lock:
+ pthread_mutex_unlock(&m);
+cleanup_m:
+ pthread_mutex_destroy(&m);
+cleanup_ma:
+ pthread_mutexattr_destroy(&ma);
+ return tdb_mutex_locking_cached;
+}
+
+static void (*tdb_robust_mutext_old_handler)(int) = SIG_ERR;
+static pid_t tdb_robust_mutex_pid = -1;
+
+static bool tdb_robust_mutex_setup_sigchild(void (*handler)(int),
+ void (**p_old_handler)(int))
+{
+#ifdef HAVE_SIGACTION
+ struct sigaction act;
+ struct sigaction oldact;
+
+ memset(&act, '\0', sizeof(act));
+
+ act.sa_handler = handler;
+#ifdef SA_RESTART
+ act.sa_flags = SA_RESTART;
+#endif
+ sigemptyset(&act.sa_mask);
+ sigaddset(&act.sa_mask, SIGCHLD);
+ sigaction(SIGCHLD, &act, &oldact);
+ if (p_old_handler) {
+ *p_old_handler = oldact.sa_handler;
+ }
+ return true;
+#else /* !HAVE_SIGACTION */
+ return false;
+#endif
+}
+
+static void tdb_robust_mutex_handler(int sig)
+{
+ if (tdb_robust_mutex_pid != -1) {
+ pid_t pid;
+ int status;
+
+ pid = waitpid(tdb_robust_mutex_pid, &status, WNOHANG);
+ if (pid == tdb_robust_mutex_pid) {
+ tdb_robust_mutex_pid = -1;
+ return;
+ }
+ }
+
+ if (tdb_robust_mutext_old_handler == SIG_DFL) {
+ return;
+ }
+ if (tdb_robust_mutext_old_handler == SIG_IGN) {
+ return;
+ }
+ if (tdb_robust_mutext_old_handler == SIG_ERR) {
+ return;
+ }
+
+ tdb_robust_mutext_old_handler(sig);
+}
+
+_PUBLIC_ bool tdb_runtime_check_for_robust_mutexes(void)
+{
+ void *ptr;
+ pthread_mutex_t *m;
+ pthread_mutexattr_t ma;
+ int ret = 1;
+ int pipe_down[2] = { -1, -1 };
+ int pipe_up[2] = { -1, -1 };
+ ssize_t nread;
+ char c = 0;
+ bool ok;
+ int status;
+ static bool initialized;
+
+ if (initialized) {
+ return tdb_mutex_locking_cached;
+ }
+
+ initialized = true;
+
+ ok = tdb_mutex_locking_supported();
+ if (!ok) {
+ return false;
+ }
+
+ tdb_mutex_locking_cached = false;
+
+ ptr = mmap(NULL, sizeof(pthread_mutex_t), PROT_READ|PROT_WRITE,
+ MAP_SHARED|MAP_ANON, -1 /* fd */, 0);
+ if (ptr == MAP_FAILED) {
+ return false;
+ }
+ m = (pthread_mutex_t *)ptr;
+
+ ret = pipe(pipe_down);
+ if (ret != 0) {
+ goto cleanup_mmap;
+ }
+ ret = pipe(pipe_up);
+ if (ret != 0) {
+ goto cleanup_pipe;
+ }
+
+ ret = pthread_mutexattr_init(&ma);
+ if (ret != 0) {
+ goto cleanup_pipe;
+ }
+ ret = pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_ERRORCHECK);
+ if (ret != 0) {
+ goto cleanup_ma;
+ }
+ ret = pthread_mutexattr_setpshared(&ma, PTHREAD_PROCESS_SHARED);
+ if (ret != 0) {
+ goto cleanup_ma;
+ }
+ ret = pthread_mutexattr_setrobust(&ma, PTHREAD_MUTEX_ROBUST);
+ if (ret != 0) {
+ goto cleanup_ma;
+ }
+ ret = pthread_mutex_init(m, &ma);
+ if (ret != 0) {
+ goto cleanup_ma;
+ }
+
+ if (tdb_robust_mutex_setup_sigchild(tdb_robust_mutex_handler,
+ &tdb_robust_mutext_old_handler) == false) {
+ goto cleanup_ma;
+ }
+
+ tdb_robust_mutex_pid = fork();
+ if (tdb_robust_mutex_pid == 0) {
+ size_t nwritten;
+ close(pipe_down[1]);
+ close(pipe_up[0]);
+ ret = pthread_mutex_lock(m);
+ nwritten = write(pipe_up[1], &ret, sizeof(ret));
+ if (nwritten != sizeof(ret)) {
+ _exit(1);
+ }
+ if (ret != 0) {
+ _exit(1);
+ }
+ nread = read(pipe_down[0], &c, 1);
+ if (nread != 1) {
+ _exit(1);
+ }
+ /* leave locked */
+ _exit(0);
+ }
+ if (tdb_robust_mutex_pid == -1) {
+ goto cleanup_sig_child;
+ }
+ close(pipe_down[0]);
+ pipe_down[0] = -1;
+ close(pipe_up[1]);
+ pipe_up[1] = -1;
+
+ nread = read(pipe_up[0], &ret, sizeof(ret));
+ if (nread != sizeof(ret)) {
+ goto cleanup_child;
+ }
+
+ ret = pthread_mutex_trylock(m);
+ if (ret != EBUSY) {
+ if (ret == 0) {
+ pthread_mutex_unlock(m);
+ }
+ goto cleanup_child;
+ }
+
+ if (write(pipe_down[1], &c, 1) != 1) {
+ goto cleanup_child;
+ }
+
+ nread = read(pipe_up[0], &c, 1);
+ if (nread != 0) {
+ goto cleanup_child;
+ }
+
+ while (tdb_robust_mutex_pid > 0) {
+ pid_t pid;
+
+ errno = 0;
+ pid = waitpid(tdb_robust_mutex_pid, &status, 0);
+ if (pid == tdb_robust_mutex_pid) {
+ tdb_robust_mutex_pid = -1;
+ break;
+ }
+ if (pid == -1 && errno != EINTR) {
+ goto cleanup_child;
+ }
+ }
+ tdb_robust_mutex_setup_sigchild(tdb_robust_mutext_old_handler, NULL);
+
+ ret = pthread_mutex_trylock(m);
+ if (ret != EOWNERDEAD) {
+ if (ret == 0) {
+ pthread_mutex_unlock(m);
+ }
+ goto cleanup_m;
+ }
+
+ ret = pthread_mutex_consistent(m);
+ if (ret != 0) {
+ goto cleanup_m;
+ }
+
+ ret = pthread_mutex_trylock(m);
+ if (ret != EDEADLK) {
+ pthread_mutex_unlock(m);
+ goto cleanup_m;
+ }
+
+ ret = pthread_mutex_unlock(m);
+ if (ret != 0) {
+ goto cleanup_m;
+ }
+
+ tdb_mutex_locking_cached = true;
+ goto cleanup_m;
+
+cleanup_child:
+ while (tdb_robust_mutex_pid > 0) {
+ pid_t pid;
+
+ kill(tdb_robust_mutex_pid, SIGKILL);
+
+ errno = 0;
+ pid = waitpid(tdb_robust_mutex_pid, &status, 0);
+ if (pid == tdb_robust_mutex_pid) {
+ tdb_robust_mutex_pid = -1;
+ break;
+ }
+ if (pid == -1 && errno != EINTR) {
+ break;
+ }
+ }
+cleanup_sig_child:
+ tdb_robust_mutex_setup_sigchild(tdb_robust_mutext_old_handler, NULL);
+cleanup_m:
+ pthread_mutex_destroy(m);
+cleanup_ma:
+ pthread_mutexattr_destroy(&ma);
+cleanup_pipe:
+ if (pipe_down[0] != -1) {
+ close(pipe_down[0]);
+ }
+ if (pipe_down[1] != -1) {
+ close(pipe_down[1]);
+ }
+ if (pipe_up[0] != -1) {
+ close(pipe_up[0]);
+ }
+ if (pipe_up[1] != -1) {
+ close(pipe_up[1]);
+ }
+cleanup_mmap:
+ munmap(ptr, sizeof(pthread_mutex_t));
+
+ return tdb_mutex_locking_cached;
+}
+
+#else
+
+size_t tdb_mutex_size(struct tdb_context *tdb)
+{
+ return 0;
+}
+
+bool tdb_have_mutexes(struct tdb_context *tdb)
+{
+ return false;
+}
+
+int tdb_mutex_allrecord_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags)
+{
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+}
+
+int tdb_mutex_allrecord_unlock(struct tdb_context *tdb)
+{
+ return -1;
+}
+
+int tdb_mutex_allrecord_upgrade(struct tdb_context *tdb)
+{
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+}
+
+void tdb_mutex_allrecord_downgrade(struct tdb_context *tdb)
+{
+ return;
+}
+
+int tdb_mutex_mmap(struct tdb_context *tdb)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int tdb_mutex_munmap(struct tdb_context *tdb)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+int tdb_mutex_init(struct tdb_context *tdb)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+_PUBLIC_ bool tdb_runtime_check_for_robust_mutexes(void)
+{
+ return false;
+}
+
+#endif
diff --git a/common/open.c b/common/open.c
new file mode 100644
index 0000000..3b53fa7
--- /dev/null
+++ b/common/open.c
@@ -0,0 +1,941 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+/* all contexts, to ensure no double-opens (fcntl locks don't nest!) */
+static struct tdb_context *tdbs = NULL;
+
+/* We use two hashes to double-check they're using the right hash function. */
+void tdb_header_hash(struct tdb_context *tdb,
+ uint32_t *magic1_hash, uint32_t *magic2_hash)
+{
+ TDB_DATA hash_key;
+ uint32_t tdb_magic = TDB_MAGIC;
+
+ hash_key.dptr = discard_const_p(unsigned char, TDB_MAGIC_FOOD);
+ hash_key.dsize = sizeof(TDB_MAGIC_FOOD);
+ *magic1_hash = tdb->hash_fn(&hash_key);
+
+ hash_key.dptr = (unsigned char *)CONVERT(tdb_magic);
+ hash_key.dsize = sizeof(tdb_magic);
+ *magic2_hash = tdb->hash_fn(&hash_key);
+
+ /* Make sure at least one hash is non-zero! */
+ if (*magic1_hash == 0 && *magic2_hash == 0)
+ *magic1_hash = 1;
+}
+
+/* initialise a new database with a specified hash size */
+static int tdb_new_database(struct tdb_context *tdb, struct tdb_header *header,
+ int hash_size)
+{
+ struct tdb_header *newdb;
+ size_t size;
+ int ret = -1;
+
+ /* We make it up in memory, then write it out if not internal */
+ size = sizeof(struct tdb_header) + (hash_size+1)*sizeof(tdb_off_t);
+ if (!(newdb = (struct tdb_header *)calloc(size, 1))) {
+ tdb->ecode = TDB_ERR_OOM;
+ return -1;
+ }
+
+ /* Fill in the header */
+ newdb->version = TDB_VERSION;
+ newdb->hash_size = hash_size;
+
+ tdb_header_hash(tdb, &newdb->magic1_hash, &newdb->magic2_hash);
+
+ /* Make sure older tdbs (which don't check the magic hash fields)
+ * will refuse to open this TDB. */
+ if (tdb->flags & TDB_INCOMPATIBLE_HASH)
+ newdb->rwlocks = TDB_HASH_RWLOCK_MAGIC;
+
+ /*
+ * We create a tdb with TDB_FEATURE_FLAG_MUTEX support,
+ * the flag combination and runtime feature checks
+ * are done by the caller already.
+ */
+ if (tdb->flags & TDB_MUTEX_LOCKING) {
+ newdb->feature_flags |= TDB_FEATURE_FLAG_MUTEX;
+ }
+
+ /*
+ * If we have any features we add the FEATURE_FLAG_MAGIC, overwriting the
+ * TDB_HASH_RWLOCK_MAGIC above.
+ */
+ if (newdb->feature_flags != 0) {
+ newdb->rwlocks = TDB_FEATURE_FLAG_MAGIC;
+ }
+
+ /*
+ * It's required for some following code pathes
+ * to have the fields on 'tdb' up-to-date.
+ *
+ * E.g. tdb_mutex_size() requires it
+ */
+ tdb->feature_flags = newdb->feature_flags;
+ tdb->hash_size = newdb->hash_size;
+
+ if (tdb->flags & TDB_INTERNAL) {
+ tdb->map_size = size;
+ tdb->map_ptr = (char *)newdb;
+ memcpy(header, newdb, sizeof(*header));
+ /* Convert the `ondisk' version if asked. */
+ CONVERT(*newdb);
+ return 0;
+ }
+ if (lseek(tdb->fd, 0, SEEK_SET) == -1)
+ goto fail;
+
+ if (ftruncate(tdb->fd, 0) == -1)
+ goto fail;
+
+ if (newdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) {
+ newdb->mutex_size = tdb_mutex_size(tdb);
+ tdb->hdr_ofs = newdb->mutex_size;
+ }
+
+ /* This creates an endian-converted header, as if read from disk */
+ CONVERT(*newdb);
+ memcpy(header, newdb, sizeof(*header));
+ /* Don't endian-convert the magic food! */
+ memcpy(newdb->magic_food, TDB_MAGIC_FOOD, strlen(TDB_MAGIC_FOOD)+1);
+
+ if (!tdb_write_all(tdb->fd, newdb, size))
+ goto fail;
+
+ if (newdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) {
+
+ /*
+ * Now we init the mutex area
+ * followed by a second header.
+ */
+
+ ret = ftruncate(
+ tdb->fd,
+ newdb->mutex_size + sizeof(struct tdb_header));
+ if (ret == -1) {
+ goto fail;
+ }
+ ret = tdb_mutex_init(tdb);
+ if (ret == -1) {
+ goto fail;
+ }
+
+ /*
+ * Write a second header behind the mutexes. That's the area
+ * that will be mmapp'ed.
+ */
+ ret = lseek(tdb->fd, newdb->mutex_size, SEEK_SET);
+ if (ret == -1) {
+ goto fail;
+ }
+ if (!tdb_write_all(tdb->fd, newdb, size)) {
+ goto fail;
+ }
+ }
+
+ ret = 0;
+ fail:
+ SAFE_FREE(newdb);
+ return ret;
+}
+
+
+
+static int tdb_already_open(dev_t device,
+ ino_t ino)
+{
+ struct tdb_context *i;
+
+ for (i = tdbs; i; i = i->next) {
+ if (i->device == device && i->inode == ino) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* open the database, creating it if necessary
+
+ The open_flags and mode are passed straight to the open call on the
+ database file. A flags value of O_WRONLY is invalid. The hash size
+ is advisory, use zero for a default value.
+
+ Return is NULL on error, in which case errno is also set. Don't
+ try to call tdb_error or tdb_errname, just do strerror(errno).
+
+ @param name may be NULL for internal databases. */
+_PUBLIC_ struct tdb_context *tdb_open(const char *name, int hash_size, int tdb_flags,
+ int open_flags, mode_t mode)
+{
+ return tdb_open_ex(name, hash_size, tdb_flags, open_flags, mode, NULL, NULL);
+}
+
+/* a default logging function */
+static void null_log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...) PRINTF_ATTRIBUTE(3, 4);
+static void null_log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...)
+{
+}
+
+static bool check_header_hash(struct tdb_context *tdb,
+ struct tdb_header *header,
+ bool default_hash, uint32_t *m1, uint32_t *m2)
+{
+ tdb_header_hash(tdb, m1, m2);
+ if (header->magic1_hash == *m1 &&
+ header->magic2_hash == *m2) {
+ return true;
+ }
+
+ /* If they explicitly set a hash, always respect it. */
+ if (!default_hash)
+ return false;
+
+ /* Otherwise, try the other inbuilt hash. */
+ if (tdb->hash_fn == tdb_old_hash)
+ tdb->hash_fn = tdb_jenkins_hash;
+ else
+ tdb->hash_fn = tdb_old_hash;
+ return check_header_hash(tdb, header, false, m1, m2);
+}
+
+static bool tdb_mutex_open_ok(struct tdb_context *tdb,
+ const struct tdb_header *header)
+{
+ int locked;
+
+ if (tdb->flags & TDB_NOLOCK) {
+ /*
+ * We don't look at locks, so it does not matter to have a
+ * compatible mutex implementation. Allow the open.
+ */
+ return true;
+ }
+
+ locked = tdb_nest_lock(tdb, ACTIVE_LOCK, F_WRLCK,
+ TDB_LOCK_NOWAIT|TDB_LOCK_PROBE);
+
+ if ((locked == -1) && (tdb->ecode == TDB_ERR_LOCK)) {
+ /*
+ * CLEAR_IF_FIRST still active. The tdb was created on this
+ * host, so we can assume the mutex implementation is
+ * compatible. Important for tools like tdbdump on a still
+ * open locking.tdb.
+ */
+ goto check_local_settings;
+ }
+
+ /*
+ * We got the CLEAR_IF_FIRST lock. That means the database was
+ * potentially copied from somewhere else. The mutex implementation
+ * might be incompatible.
+ */
+
+ if (tdb_nest_unlock(tdb, ACTIVE_LOCK, F_WRLCK, false) == -1) {
+ /*
+ * Should not happen
+ */
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_mutex_open_ok: "
+ "failed to release ACTIVE_LOCK on %s: %s\n",
+ tdb->name, strerror(errno)));
+ return false;
+ }
+
+check_local_settings:
+
+ if (!(tdb->flags & TDB_MUTEX_LOCKING)) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_mutex_open_ok[%s]: "
+ "Can use mutexes only with "
+ "MUTEX_LOCKING or NOLOCK\n",
+ tdb->name));
+ return false;
+ }
+
+ if (tdb_mutex_size(tdb) != header->mutex_size) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_mutex_open_ok[%s]: "
+ "Mutex size changed from %u to %u\n.",
+ tdb->name,
+ (unsigned int)header->mutex_size,
+ (unsigned int)tdb_mutex_size(tdb)));
+ return false;
+ }
+
+ return true;
+}
+
+_PUBLIC_ struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
+ int open_flags, mode_t mode,
+ const struct tdb_logging_context *log_ctx,
+ tdb_hash_func hash_fn)
+{
+ int orig_errno = errno;
+ struct tdb_header header;
+ struct tdb_context *tdb;
+ struct stat st;
+ int rev = 0, locked = 0;
+ unsigned char *vp;
+ uint32_t vertest;
+ unsigned v;
+ const char *hash_alg;
+ uint32_t magic1, magic2;
+ int ret;
+
+ ZERO_STRUCT(header);
+
+ if (!(tdb = (struct tdb_context *)calloc(1, sizeof *tdb))) {
+ /* Can't log this */
+ errno = ENOMEM;
+ goto fail;
+ }
+ tdb_io_init(tdb);
+
+ if (tdb_flags & TDB_INTERNAL) {
+ tdb_flags |= TDB_INCOMPATIBLE_HASH;
+ }
+ if (tdb_flags & TDB_MUTEX_LOCKING) {
+ tdb_flags |= TDB_INCOMPATIBLE_HASH;
+ }
+
+ tdb->fd = -1;
+#ifdef TDB_TRACE
+ tdb->tracefd = -1;
+#endif
+ tdb->name = NULL;
+ tdb->map_ptr = NULL;
+ tdb->flags = tdb_flags;
+ tdb->open_flags = open_flags;
+ if (log_ctx) {
+ tdb->log = *log_ctx;
+ } else {
+ tdb->log.log_fn = null_log_fn;
+ tdb->log.log_private = NULL;
+ }
+
+ if (name == NULL && (tdb_flags & TDB_INTERNAL)) {
+ name = "__TDB_INTERNAL__";
+ }
+
+ if (name == NULL) {
+ tdb->name = discard_const_p(char, "__NULL__");
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: called with name == NULL\n"));
+ tdb->name = NULL;
+ errno = EINVAL;
+ goto fail;
+ }
+
+ /* now make a copy of the name, as the caller memory might go away */
+ if (!(tdb->name = (char *)strdup(name))) {
+ /*
+ * set the name as the given string, so that tdb_name() will
+ * work in case of an error.
+ */
+ tdb->name = discard_const_p(char, name);
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: can't strdup(%s)\n",
+ name));
+ tdb->name = NULL;
+ errno = ENOMEM;
+ goto fail;
+ }
+
+ if (hash_fn) {
+ tdb->hash_fn = hash_fn;
+ hash_alg = "the user defined";
+ } else {
+ /* This controls what we use when creating a tdb. */
+ if (tdb->flags & TDB_INCOMPATIBLE_HASH) {
+ tdb->hash_fn = tdb_jenkins_hash;
+ } else {
+ tdb->hash_fn = tdb_old_hash;
+ }
+ hash_alg = "either default";
+ }
+
+ /* cache the page size */
+ tdb->page_size = getpagesize();
+ if (tdb->page_size <= 0) {
+ tdb->page_size = 0x2000;
+ }
+
+ tdb->max_dead_records = (tdb_flags & TDB_VOLATILE) ? 5 : 0;
+
+ if ((open_flags & O_ACCMODE) == O_WRONLY) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: can't open tdb %s write-only\n",
+ name));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ if (hash_size == 0)
+ hash_size = DEFAULT_HASH_SIZE;
+ if ((open_flags & O_ACCMODE) == O_RDONLY) {
+ tdb->read_only = 1;
+ /* read only databases don't do locking or clear if first */
+ tdb->flags |= TDB_NOLOCK;
+ tdb->flags &= ~(TDB_CLEAR_IF_FIRST|TDB_MUTEX_LOCKING);
+ }
+
+ if ((tdb->flags & TDB_ALLOW_NESTING) &&
+ (tdb->flags & TDB_DISALLOW_NESTING)) {
+ tdb->ecode = TDB_ERR_NESTING;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "allow_nesting and disallow_nesting are not allowed together!"));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ if (tdb->flags & TDB_MUTEX_LOCKING) {
+ /*
+ * Here we catch bugs in the callers,
+ * the runtime check for existing tdb's comes later.
+ */
+
+ if (!(tdb->flags & TDB_CLEAR_IF_FIRST)) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
+ "invalid flags for %s - TDB_MUTEX_LOCKING "
+ "requires TDB_CLEAR_IF_FIRST\n", name));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ if (tdb->flags & TDB_INTERNAL) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
+ "invalid flags for %s - TDB_MUTEX_LOCKING and "
+ "TDB_INTERNAL are not allowed together\n", name));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ if (tdb->flags & TDB_NOMMAP) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
+ "invalid flags for %s - TDB_MUTEX_LOCKING and "
+ "TDB_NOMMAP are not allowed together\n", name));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ if (tdb->read_only) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
+ "invalid flags for %s - TDB_MUTEX_LOCKING "
+ "not allowed read only\n", name));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ /*
+ * The callers should have called
+ * tdb_runtime_check_for_robust_mutexes()
+ * before using TDB_MUTEX_LOCKING!
+ *
+ * This makes sure the caller understands
+ * that the locking may behave a bit differently
+ * than with pure fcntl locking. E.g. multiple
+ * read locks are not supported.
+ */
+ if (!tdb_runtime_check_for_robust_mutexes()) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
+ "invalid flags for %s - TDB_MUTEX_LOCKING "
+ "requires support for robust_mutexes\n",
+ name));
+ errno = ENOSYS;
+ goto fail;
+ }
+ }
+
+ if (getenv("TDB_NO_FSYNC")) {
+ tdb->flags |= TDB_NOSYNC;
+ }
+
+ /*
+ * TDB_ALLOW_NESTING is the default behavior.
+ * Note: this may change in future versions!
+ */
+ if (!(tdb->flags & TDB_DISALLOW_NESTING)) {
+ tdb->flags |= TDB_ALLOW_NESTING;
+ }
+
+ /* internal databases don't mmap or lock, and start off cleared */
+ if (tdb->flags & TDB_INTERNAL) {
+ tdb->flags |= (TDB_NOLOCK | TDB_NOMMAP);
+ tdb->flags &= ~TDB_CLEAR_IF_FIRST;
+ if (tdb_new_database(tdb, &header, hash_size) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: tdb_new_database failed!"));
+ goto fail;
+ }
+ tdb->hash_size = hash_size;
+ goto internal;
+ }
+
+ if ((tdb->fd = open(name, open_flags, mode)) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_open_ex: could not open file %s: %s\n",
+ name, strerror(errno)));
+ goto fail; /* errno set by open(2) */
+ }
+
+ /* on exec, don't inherit the fd */
+ v = fcntl(tdb->fd, F_GETFD, 0);
+ fcntl(tdb->fd, F_SETFD, v | FD_CLOEXEC);
+
+ /* ensure there is only one process initialising at once */
+ if (tdb_nest_lock(tdb, OPEN_LOCK, F_WRLCK, TDB_LOCK_WAIT) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: failed to get open lock on %s: %s\n",
+ name, strerror(errno)));
+ goto fail; /* errno set by tdb_brlock */
+ }
+
+ /* we need to zero database if we are the only one with it open */
+ if ((tdb_flags & TDB_CLEAR_IF_FIRST) &&
+ (!tdb->read_only) &&
+ (locked = (tdb_nest_lock(tdb, ACTIVE_LOCK, F_WRLCK, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE) == 0))) {
+ ret = tdb_brlock(tdb, F_WRLCK, FREELIST_TOP, 0,
+ TDB_LOCK_WAIT);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "tdb_brlock failed for %s: %s\n",
+ name, strerror(errno)));
+ goto fail;
+ }
+ ret = tdb_new_database(tdb, &header, hash_size);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "tdb_new_database failed for %s: %s\n",
+ name, strerror(errno)));
+ tdb_unlockall(tdb);
+ goto fail;
+ }
+ ret = tdb_brunlock(tdb, F_WRLCK, FREELIST_TOP, 0);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "tdb_unlockall failed for %s: %s\n",
+ name, strerror(errno)));
+ goto fail;
+ }
+ ret = lseek(tdb->fd, 0, SEEK_SET);
+ if (ret == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "lseek failed for %s: %s\n",
+ name, strerror(errno)));
+ goto fail;
+ }
+ }
+
+ errno = 0;
+ if (read(tdb->fd, &header, sizeof(header)) != sizeof(header)
+ || strcmp(header.magic_food, TDB_MAGIC_FOOD) != 0) {
+ if (!(open_flags & O_CREAT) ||
+ tdb_new_database(tdb, &header, hash_size) == -1) {
+ if (errno == 0) {
+ errno = EIO; /* ie bad format or something */
+ }
+ goto fail;
+ }
+ rev = (tdb->flags & TDB_CONVERT);
+ } else if (header.version != TDB_VERSION
+ && !(rev = (header.version==TDB_BYTEREV(TDB_VERSION)))) {
+ /* wrong version */
+ errno = EIO;
+ goto fail;
+ }
+ vp = (unsigned char *)&header.version;
+ vertest = (((uint32_t)vp[0]) << 24) | (((uint32_t)vp[1]) << 16) |
+ (((uint32_t)vp[2]) << 8) | (uint32_t)vp[3];
+ tdb->flags |= (vertest==TDB_VERSION) ? TDB_BIGENDIAN : 0;
+ if (!rev)
+ tdb->flags &= ~TDB_CONVERT;
+ else {
+ tdb->flags |= TDB_CONVERT;
+ tdb_convert(&header, sizeof(header));
+ }
+
+ /*
+ * We only use st.st_dev and st.st_ino from the raw fstat()
+ * call, everything else needs to use tdb_fstat() in order
+ * to skip tdb->hdr_ofs!
+ */
+ if (fstat(tdb->fd, &st) == -1) {
+ goto fail;
+ }
+ tdb->device = st.st_dev;
+ tdb->inode = st.st_ino;
+ ZERO_STRUCT(st);
+
+ if (header.rwlocks != 0 &&
+ header.rwlocks != TDB_FEATURE_FLAG_MAGIC &&
+ header.rwlocks != TDB_HASH_RWLOCK_MAGIC) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: spinlocks no longer supported\n"));
+ errno = ENOSYS;
+ goto fail;
+ }
+ tdb->hash_size = header.hash_size;
+
+ if (header.rwlocks == TDB_FEATURE_FLAG_MAGIC) {
+ tdb->feature_flags = header.feature_flags;
+ }
+
+ if (tdb->feature_flags & ~TDB_SUPPORTED_FEATURE_FLAGS) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: unsupported "
+ "features in tdb %s: 0x%08x (supported: 0x%08x)\n",
+ name, (unsigned)tdb->feature_flags,
+ (unsigned)TDB_SUPPORTED_FEATURE_FLAGS));
+ errno = ENOSYS;
+ goto fail;
+ }
+
+ if (tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) {
+ if (!tdb_mutex_open_ok(tdb, &header)) {
+ errno = EINVAL;
+ goto fail;
+ }
+
+ /*
+ * We need to remember the hdr_ofs
+ * also for the TDB_NOLOCK case
+ * if the current library doesn't support
+ * mutex locking.
+ */
+ tdb->hdr_ofs = header.mutex_size;
+ }
+
+ if ((header.magic1_hash == 0) && (header.magic2_hash == 0)) {
+ /* older TDB without magic hash references */
+ tdb->hash_fn = tdb_old_hash;
+ } else if (!check_header_hash(tdb, &header, !hash_fn,
+ &magic1, &magic2)) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_open_ex: "
+ "%s was not created with %s hash function we are using\n"
+ "magic1_hash[0x%08X %s 0x%08X] "
+ "magic2_hash[0x%08X %s 0x%08X]\n",
+ name, hash_alg,
+ header.magic1_hash,
+ (header.magic1_hash == magic1) ? "==" : "!=",
+ magic1,
+ header.magic2_hash,
+ (header.magic2_hash == magic2) ? "==" : "!=",
+ magic2));
+ errno = EINVAL;
+ goto fail;
+ }
+
+ /* Is it already in the open list? If so, fail. */
+ if (tdb_already_open(tdb->device, tdb->inode)) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
+ "%s (%d,%d) is already open in this process\n",
+ name, (int)tdb->device, (int)tdb->inode));
+ errno = EBUSY;
+ goto fail;
+ }
+
+ /*
+ * We had tdb_mmap(tdb) here before,
+ * but we need to use tdb_fstat(),
+ * which is triggered from tdb_oob() before calling tdb_mmap().
+ * As this skips tdb->hdr_ofs.
+ */
+ tdb->map_size = 0;
+ ret = tdb->methods->tdb_oob(tdb, 0, 1, 0);
+ if (ret == -1) {
+ errno = EIO;
+ goto fail;
+ }
+
+ if (tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) {
+ if (!(tdb->flags & TDB_NOLOCK)) {
+ ret = tdb_mutex_mmap(tdb);
+ if (ret != 0) {
+ goto fail;
+ }
+ }
+ }
+
+ if (locked) {
+ if (tdb_nest_unlock(tdb, ACTIVE_LOCK, F_WRLCK, false) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: "
+ "failed to release ACTIVE_LOCK on %s: %s\n",
+ name, strerror(errno)));
+ goto fail;
+ }
+
+ }
+
+ /* We always need to do this if the CLEAR_IF_FIRST flag is set, even if
+ we didn't get the initial exclusive lock as we need to let all other
+ users know we're using it. */
+
+ if (tdb_flags & TDB_CLEAR_IF_FIRST) {
+ /* leave this lock in place to indicate it's in use */
+ if (tdb_nest_lock(tdb, ACTIVE_LOCK, F_RDLCK, TDB_LOCK_WAIT) == -1) {
+ goto fail;
+ }
+ }
+
+ /* if needed, run recovery */
+ if (tdb_transaction_recover(tdb) == -1) {
+ goto fail;
+ }
+
+#ifdef TDB_TRACE
+ {
+ char tracefile[strlen(name) + 32];
+
+ snprintf(tracefile, sizeof(tracefile),
+ "%s.trace.%li", name, (long)getpid());
+ tdb->tracefd = open(tracefile, O_WRONLY|O_CREAT|O_EXCL, 0600);
+ if (tdb->tracefd >= 0) {
+ tdb_enable_seqnum(tdb);
+ tdb_trace_open(tdb, "tdb_open", hash_size, tdb_flags,
+ open_flags);
+ } else
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: failed to open trace file %s!\n", tracefile));
+ }
+#endif
+
+ internal:
+ /* Internal (memory-only) databases skip all the code above to
+ * do with disk files, and resume here by releasing their
+ * open lock and hooking into the active list. */
+ if (tdb_nest_unlock(tdb, OPEN_LOCK, F_WRLCK, false) == -1) {
+ goto fail;
+ }
+ tdb->next = tdbs;
+ tdbs = tdb;
+ errno = orig_errno;
+ return tdb;
+
+ fail:
+ { int save_errno = errno;
+
+ if (!tdb)
+ return NULL;
+
+#ifdef TDB_TRACE
+ close(tdb->tracefd);
+#endif
+ if (tdb->map_ptr) {
+ if (tdb->flags & TDB_INTERNAL)
+ SAFE_FREE(tdb->map_ptr);
+ else
+ tdb_munmap(tdb);
+ }
+ if (tdb->fd != -1)
+ if (close(tdb->fd) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_open_ex: failed to close tdb->fd on error!\n"));
+ SAFE_FREE(tdb->lockrecs);
+ SAFE_FREE(tdb->name);
+ SAFE_FREE(tdb);
+ errno = save_errno;
+ return NULL;
+ }
+}
+
+/*
+ * Set the maximum number of dead records per hash chain
+ */
+
+_PUBLIC_ void tdb_set_max_dead(struct tdb_context *tdb, int max_dead)
+{
+ tdb->max_dead_records = max_dead;
+}
+
+/**
+ * Close a database.
+ *
+ * @returns -1 for error; 0 for success.
+ **/
+_PUBLIC_ int tdb_close(struct tdb_context *tdb)
+{
+ struct tdb_context **i;
+ int ret = 0;
+
+ if (tdb->transaction) {
+ tdb_transaction_cancel(tdb);
+ }
+ tdb_trace(tdb, "tdb_close");
+
+ if (tdb->map_ptr) {
+ if (tdb->flags & TDB_INTERNAL)
+ SAFE_FREE(tdb->map_ptr);
+ else
+ tdb_munmap(tdb);
+ }
+
+ tdb_mutex_munmap(tdb);
+
+ SAFE_FREE(tdb->name);
+ if (tdb->fd != -1) {
+ ret = close(tdb->fd);
+ tdb->fd = -1;
+ }
+ SAFE_FREE(tdb->lockrecs);
+
+ /* Remove from contexts list */
+ for (i = &tdbs; *i; i = &(*i)->next) {
+ if (*i == tdb) {
+ *i = tdb->next;
+ break;
+ }
+ }
+
+#ifdef TDB_TRACE
+ close(tdb->tracefd);
+#endif
+ memset(tdb, 0, sizeof(*tdb));
+ SAFE_FREE(tdb);
+
+ return ret;
+}
+
+/* register a loging function */
+_PUBLIC_ void tdb_set_logging_function(struct tdb_context *tdb,
+ const struct tdb_logging_context *log_ctx)
+{
+ tdb->log = *log_ctx;
+}
+
+_PUBLIC_ void *tdb_get_logging_private(struct tdb_context *tdb)
+{
+ return tdb->log.log_private;
+}
+
+static int tdb_reopen_internal(struct tdb_context *tdb, bool active_lock)
+{
+#if !defined(LIBREPLACE_PREAD_NOT_REPLACED) || \
+ !defined(LIBREPLACE_PWRITE_NOT_REPLACED)
+ struct stat st;
+#endif
+
+ if (tdb->flags & TDB_INTERNAL) {
+ return 0; /* Nothing to do. */
+ }
+
+ if (tdb_have_extra_locks(tdb)) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_reopen: reopen not allowed with locks held\n"));
+ goto fail;
+ }
+
+ if (tdb->transaction != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_reopen: reopen not allowed inside a transaction\n"));
+ goto fail;
+ }
+
+/* If we have real pread & pwrite, we can skip reopen. */
+#if !defined(LIBREPLACE_PREAD_NOT_REPLACED) || \
+ !defined(LIBREPLACE_PWRITE_NOT_REPLACED)
+ if (tdb_munmap(tdb) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: munmap failed (%s)\n", strerror(errno)));
+ goto fail;
+ }
+ if (close(tdb->fd) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: WARNING closing tdb->fd failed!\n"));
+ tdb->fd = open(tdb->name, tdb->open_flags & ~(O_CREAT|O_TRUNC), 0);
+ if (tdb->fd == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: open failed (%s)\n", strerror(errno)));
+ goto fail;
+ }
+ /*
+ * We only use st.st_dev and st.st_ino from the raw fstat()
+ * call, everything else needs to use tdb_fstat() in order
+ * to skip tdb->hdr_ofs!
+ */
+ if (fstat(tdb->fd, &st) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: fstat failed (%s)\n", strerror(errno)));
+ goto fail;
+ }
+ if (st.st_ino != tdb->inode || st.st_dev != tdb->device) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: file dev/inode has changed!\n"));
+ goto fail;
+ }
+ ZERO_STRUCT(st);
+
+ /*
+ * We had tdb_mmap(tdb) here before,
+ * but we need to use tdb_fstat(),
+ * which is triggered from tdb_oob() before calling tdb_mmap().
+ * As this skips tdb->hdr_ofs.
+ */
+ tdb->map_size = 0;
+ if (tdb->methods->tdb_oob(tdb, 0, 1, 0) != 0) {
+ goto fail;
+ }
+#endif /* fake pread or pwrite */
+
+ /* We may still think we hold the active lock. */
+ tdb->num_lockrecs = 0;
+ SAFE_FREE(tdb->lockrecs);
+ tdb->lockrecs_array_length = 0;
+
+ if (active_lock && tdb_nest_lock(tdb, ACTIVE_LOCK, F_RDLCK, TDB_LOCK_WAIT) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_reopen: failed to obtain active lock\n"));
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ tdb_close(tdb);
+ return -1;
+}
+
+/* reopen a tdb - this can be used after a fork to ensure that we have an independent
+ seek pointer from our parent and to re-establish locks */
+_PUBLIC_ int tdb_reopen(struct tdb_context *tdb)
+{
+ return tdb_reopen_internal(tdb, tdb->flags & TDB_CLEAR_IF_FIRST);
+}
+
+/* reopen all tdb's */
+_PUBLIC_ int tdb_reopen_all(int parent_longlived)
+{
+ struct tdb_context *tdb;
+
+ for (tdb=tdbs; tdb; tdb = tdb->next) {
+ bool active_lock = (tdb->flags & TDB_CLEAR_IF_FIRST);
+
+ /*
+ * If the parent is longlived (ie. a
+ * parent daemon architecture), we know
+ * it will keep it's active lock on a
+ * tdb opened with CLEAR_IF_FIRST. Thus
+ * for child processes we don't have to
+ * add an active lock. This is essential
+ * to improve performance on systems that
+ * keep POSIX locks as a non-scalable data
+ * structure in the kernel.
+ */
+ if (parent_longlived) {
+ /* Ensure no clear-if-first. */
+ active_lock = false;
+ }
+
+ if (tdb_reopen_internal(tdb, active_lock) != 0)
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/common/rescue.c b/common/rescue.c
new file mode 100644
index 0000000..17e7ed8
--- /dev/null
+++ b/common/rescue.c
@@ -0,0 +1,349 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library, rescue attempt code.
+
+ Copyright (C) Rusty Russell 2012
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+#include "tdb_private.h"
+#include <assert.h>
+
+
+struct found {
+ tdb_off_t head; /* 0 -> invalid. */
+ struct tdb_record rec;
+ TDB_DATA key;
+ bool in_hash;
+ bool in_free;
+};
+
+struct found_table {
+ /* As an ordered array (by head offset). */
+ struct found *arr;
+ unsigned int num, max;
+};
+
+static bool looks_like_valid_record(struct tdb_context *tdb,
+ tdb_off_t off,
+ const struct tdb_record *rec,
+ TDB_DATA *key)
+{
+ unsigned int hval;
+
+ if (rec->magic != TDB_MAGIC)
+ return false;
+
+ if (rec->key_len + rec->data_len > rec->rec_len)
+ return false;
+
+ if (rec->rec_len % TDB_ALIGNMENT)
+ return false;
+
+ /* Next pointer must make some sense. */
+ if (rec->next > 0 && rec->next < TDB_DATA_START(tdb->hash_size))
+ return false;
+
+ if (tdb->methods->tdb_oob(tdb, rec->next, sizeof(*rec), 1))
+ return false;
+
+ key->dsize = rec->key_len;
+ key->dptr = tdb_alloc_read(tdb, off + sizeof(*rec), key->dsize);
+ if (!key->dptr)
+ return false;
+
+ hval = tdb->hash_fn(key);
+ if (hval != rec->full_hash) {
+ free(key->dptr);
+ return false;
+ }
+
+ /* Caller frees up key->dptr */
+ return true;
+}
+
+static bool add_to_table(struct found_table *found,
+ tdb_off_t off,
+ struct tdb_record *rec,
+ TDB_DATA key)
+{
+ if (found->num + 1 > found->max) {
+ struct found *new;
+ found->max = (found->max ? found->max * 2 : 128);
+ new = realloc(found->arr, found->max * sizeof(found->arr[0]));
+ if (!new)
+ return false;
+ found->arr = new;
+ }
+
+ found->arr[found->num].head = off;
+ found->arr[found->num].rec = *rec;
+ found->arr[found->num].key = key;
+ found->arr[found->num].in_hash = false;
+ found->arr[found->num].in_free = false;
+
+ found->num++;
+ return true;
+}
+
+static bool walk_record(struct tdb_context *tdb,
+ const struct found *f,
+ void (*walk)(TDB_DATA, TDB_DATA, void *private_data),
+ void *private_data)
+{
+ TDB_DATA data;
+
+ data.dsize = f->rec.data_len;
+ data.dptr = tdb_alloc_read(tdb,
+ f->head + sizeof(f->rec) + f->rec.key_len,
+ data.dsize);
+ if (!data.dptr) {
+ if (tdb->ecode == TDB_ERR_OOM)
+ return false;
+ /* I/O errors are expected. */
+ return true;
+ }
+
+ walk(f->key, data, private_data);
+ free(data.dptr);
+ return true;
+}
+
+/* First entry which has offset >= this one. */
+static unsigned int find_entry(struct found_table *found, tdb_off_t off)
+{
+ unsigned int start = 0, end = found->num;
+
+ while (start < end) {
+ /* We can't overflow here. */
+ unsigned int mid = (start + end) / 2;
+
+ if (off < found->arr[mid].head) {
+ end = mid;
+ } else if (off > found->arr[mid].head) {
+ start = mid + 1;
+ } else {
+ return mid;
+ }
+ }
+
+ assert(start == end);
+ return end;
+}
+
+static void found_in_hashchain(struct found_table *found, tdb_off_t head)
+{
+ unsigned int match;
+
+ match = find_entry(found, head);
+ if (match < found->num && found->arr[match].head == head) {
+ found->arr[match].in_hash = true;
+ }
+}
+
+static void mark_free_area(struct found_table *found, tdb_off_t head,
+ tdb_len_t len)
+{
+ unsigned int match;
+
+ match = find_entry(found, head);
+ /* Mark everything within this free entry. */
+ while (match < found->num) {
+ if (found->arr[match].head >= head + len) {
+ break;
+ }
+ found->arr[match].in_free = true;
+ match++;
+ }
+}
+
+static int cmp_key(const void *a, const void *b)
+{
+ const struct found *fa = a, *fb = b;
+
+ if (fa->key.dsize < fb->key.dsize) {
+ return -1;
+ } else if (fa->key.dsize > fb->key.dsize) {
+ return 1;
+ }
+ return memcmp(fa->key.dptr, fb->key.dptr, fa->key.dsize);
+}
+
+static bool key_eq(TDB_DATA a, TDB_DATA b)
+{
+ return a.dsize == b.dsize
+ && memcmp(a.dptr, b.dptr, a.dsize) == 0;
+}
+
+static void free_table(struct found_table *found)
+{
+ unsigned int i;
+
+ for (i = 0; i < found->num; i++) {
+ free(found->arr[i].key.dptr);
+ }
+ free(found->arr);
+}
+
+static void logging_suppressed(struct tdb_context *tdb,
+ enum tdb_debug_level level, const char *fmt, ...)
+{
+}
+
+_PUBLIC_ int tdb_rescue(struct tdb_context *tdb,
+ void (*walk)(TDB_DATA, TDB_DATA, void *private_data),
+ void *private_data)
+{
+ struct found_table found = { NULL, 0, 0 };
+ tdb_off_t h, off, i;
+ tdb_log_func oldlog = tdb->log.log_fn;
+ struct tdb_record rec;
+ TDB_DATA key;
+ bool locked;
+
+ /* Read-only databases use no locking at all: it's best-effort.
+ * We may have a write lock already, so skip that case too. */
+ if (tdb->read_only || tdb->allrecord_lock.count != 0) {
+ locked = false;
+ } else {
+ if (tdb_lockall_read(tdb) == -1)
+ return -1;
+ locked = true;
+ }
+
+ /* Make sure we know true size of the underlying file. */
+ tdb->methods->tdb_oob(tdb, tdb->map_size, 1, 1);
+
+ /* Suppress logging, since we anticipate errors. */
+ tdb->log.log_fn = logging_suppressed;
+
+ /* Now walk entire db looking for records. */
+ for (off = TDB_DATA_START(tdb->hash_size);
+ off < tdb->map_size;
+ off += TDB_ALIGNMENT) {
+ if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec),
+ DOCONV()) == -1)
+ continue;
+
+ if (looks_like_valid_record(tdb, off, &rec, &key)) {
+ if (!add_to_table(&found, off, &rec, key)) {
+ goto oom;
+ }
+ }
+ }
+
+ /* Walk hash chains to positive vet. */
+ for (h = 0; h < 1+tdb->hash_size; h++) {
+ bool slow_chase = false;
+ tdb_off_t slow_off = FREELIST_TOP + h*sizeof(tdb_off_t);
+
+ if (tdb_ofs_read(tdb, FREELIST_TOP + h*sizeof(tdb_off_t),
+ &off) == -1)
+ continue;
+
+ while (off && off != slow_off) {
+ if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec),
+ DOCONV()) != 0) {
+ break;
+ }
+
+ /* 0 is the free list, rest are hash chains. */
+ if (h == 0) {
+ /* Don't mark garbage as free. */
+ if (rec.magic != TDB_FREE_MAGIC) {
+ break;
+ }
+ mark_free_area(&found, off,
+ sizeof(rec) + rec.rec_len);
+ } else {
+ found_in_hashchain(&found, off);
+ }
+
+ off = rec.next;
+
+ /* Loop detection using second pointer at half-speed */
+ if (slow_chase) {
+ /* First entry happens to be next ptr */
+ tdb_ofs_read(tdb, slow_off, &slow_off);
+ }
+ slow_chase = !slow_chase;
+ }
+ }
+
+ /* Recovery area: must be marked as free, since it often has old
+ * records in there! */
+ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &off) == 0 && off != 0) {
+ if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec),
+ DOCONV()) == 0) {
+ mark_free_area(&found, off, sizeof(rec) + rec.rec_len);
+ }
+ }
+
+ /* Now sort by key! */
+ qsort(found.arr, found.num, sizeof(found.arr[0]), cmp_key);
+
+ for (i = 0; i < found.num; ) {
+ unsigned int num, num_in_hash = 0;
+
+ /* How many are identical? */
+ for (num = 0; num < found.num - i; num++) {
+ if (!key_eq(found.arr[i].key, found.arr[i+num].key)) {
+ break;
+ }
+ if (found.arr[i+num].in_hash) {
+ if (!walk_record(tdb, &found.arr[i+num],
+ walk, private_data))
+ goto oom;
+ num_in_hash++;
+ }
+ }
+ assert(num);
+
+ /* If none were in the hash, we print any not in free list. */
+ if (num_in_hash == 0) {
+ unsigned int j;
+
+ for (j = i; j < i + num; j++) {
+ if (!found.arr[j].in_free) {
+ if (!walk_record(tdb, &found.arr[j],
+ walk, private_data))
+ goto oom;
+ }
+ }
+ }
+
+ i += num;
+ }
+
+ tdb->log.log_fn = oldlog;
+ if (locked) {
+ tdb_unlockall_read(tdb);
+ }
+ return 0;
+
+oom:
+ tdb->log.log_fn = oldlog;
+ tdb->ecode = TDB_ERR_OOM;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_rescue: failed allocating\n"));
+ free_table(&found);
+ if (locked) {
+ tdb_unlockall_read(tdb);
+ }
+ return -1;
+}
diff --git a/common/summary.c b/common/summary.c
new file mode 100644
index 0000000..d786132
--- /dev/null
+++ b/common/summary.c
@@ -0,0 +1,210 @@
+ /*
+ Trivial Database: human-readable summary code
+ Copyright (C) Rusty Russell 2010
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+#include "tdb_private.h"
+
+#define SUMMARY_FORMAT \
+ "Size of file/data: %llu/%zu\n" \
+ "Header offset/logical size: %zu/%zu\n" \
+ "Number of records: %zu\n" \
+ "Incompatible hash: %s\n" \
+ "Active/supported feature flags: 0x%08x/0x%08x\n" \
+ "Robust mutexes locking: %s\n" \
+ "Smallest/average/largest keys: %zu/%zu/%zu\n" \
+ "Smallest/average/largest data: %zu/%zu/%zu\n" \
+ "Smallest/average/largest padding: %zu/%zu/%zu\n" \
+ "Number of dead records: %zu\n" \
+ "Smallest/average/largest dead records: %zu/%zu/%zu\n" \
+ "Number of free records: %zu\n" \
+ "Smallest/average/largest free records: %zu/%zu/%zu\n" \
+ "Number of hash chains: %zu\n" \
+ "Smallest/average/largest hash chains: %zu/%zu/%zu\n" \
+ "Number of uncoalesced records: %zu\n" \
+ "Smallest/average/largest uncoalesced runs: %zu/%zu/%zu\n" \
+ "Percentage keys/data/padding/free/dead/rechdrs&tailers/hashes: %.0f/%.0f/%.0f/%.0f/%.0f/%.0f/%.0f\n"
+
+/* We don't use tally module, to keep upstream happy. */
+struct tally {
+ size_t min, max, total;
+ size_t num;
+};
+
+static void tally_init(struct tally *tally)
+{
+ tally->total = 0;
+ tally->num = 0;
+ tally->min = tally->max = 0;
+}
+
+static void tally_add(struct tally *tally, size_t len)
+{
+ if (tally->num == 0)
+ tally->max = tally->min = len;
+ else if (len > tally->max)
+ tally->max = len;
+ else if (len < tally->min)
+ tally->min = len;
+ tally->num++;
+ tally->total += len;
+}
+
+static size_t tally_mean(const struct tally *tally)
+{
+ if (!tally->num)
+ return 0;
+ return tally->total / tally->num;
+}
+
+static size_t get_hash_length(struct tdb_context *tdb, unsigned int i)
+{
+ tdb_off_t rec_ptr;
+ size_t count = 0;
+
+ if (tdb_ofs_read(tdb, TDB_HASH_TOP(i), &rec_ptr) == -1)
+ return 0;
+
+ /* keep looking until we find the right record */
+ while (rec_ptr) {
+ struct tdb_record r;
+ ++count;
+ if (tdb_rec_read(tdb, rec_ptr, &r) == -1)
+ return 0;
+ rec_ptr = r.next;
+ }
+ return count;
+}
+
+_PUBLIC_ char *tdb_summary(struct tdb_context *tdb)
+{
+ off_t file_size;
+ tdb_off_t off, rec_off;
+ struct tally freet, keys, data, dead, extra, hashval, uncoal;
+ struct tdb_record rec;
+ char *ret = NULL;
+ bool locked;
+ size_t unc = 0;
+ int len;
+ struct tdb_record recovery;
+
+ /* Read-only databases use no locking at all: it's best-effort.
+ * We may have a write lock already, so skip that case too. */
+ if (tdb->read_only || tdb->allrecord_lock.count != 0) {
+ locked = false;
+ } else {
+ if (tdb_lockall_read(tdb) == -1)
+ return NULL;
+ locked = true;
+ }
+
+ if (tdb_recovery_area(tdb, tdb->methods, &rec_off, &recovery) != 0) {
+ goto unlock;
+ }
+
+ tally_init(&freet);
+ tally_init(&keys);
+ tally_init(&data);
+ tally_init(&dead);
+ tally_init(&extra);
+ tally_init(&hashval);
+ tally_init(&uncoal);
+
+ for (off = TDB_DATA_START(tdb->hash_size);
+ off < tdb->map_size - 1;
+ off += sizeof(rec) + rec.rec_len) {
+ if (tdb->methods->tdb_read(tdb, off, &rec, sizeof(rec),
+ DOCONV()) == -1)
+ goto unlock;
+ switch (rec.magic) {
+ case TDB_MAGIC:
+ tally_add(&keys, rec.key_len);
+ tally_add(&data, rec.data_len);
+ tally_add(&extra, rec.rec_len - (rec.key_len
+ + rec.data_len));
+ if (unc > 1)
+ tally_add(&uncoal, unc - 1);
+ unc = 0;
+ break;
+ case TDB_FREE_MAGIC:
+ tally_add(&freet, rec.rec_len);
+ unc++;
+ break;
+ /* If we crash after ftruncate, we can get zeroes or fill. */
+ case TDB_RECOVERY_INVALID_MAGIC:
+ case 0x42424242:
+ unc++;
+ /* If it's a valid recovery, we can trust rec_len. */
+ if (off != rec_off) {
+ rec.rec_len = tdb_dead_space(tdb, off)
+ - sizeof(rec);
+ }
+ /* Fall through */
+ case TDB_DEAD_MAGIC:
+ tally_add(&dead, rec.rec_len);
+ break;
+ default:
+ TDB_LOG((tdb, TDB_DEBUG_ERROR,
+ "Unexpected record magic 0x%x at offset %u\n",
+ rec.magic, off));
+ goto unlock;
+ }
+ }
+ if (unc > 1)
+ tally_add(&uncoal, unc - 1);
+
+ for (off = 0; off < tdb->hash_size; off++)
+ tally_add(&hashval, get_hash_length(tdb, off));
+
+ file_size = tdb->hdr_ofs + tdb->map_size;
+
+ len = asprintf(&ret, SUMMARY_FORMAT,
+ (unsigned long long)file_size, keys.total+data.total,
+ (size_t)tdb->hdr_ofs, (size_t)tdb->map_size,
+ keys.num,
+ (tdb->hash_fn == tdb_jenkins_hash)?"yes":"no",
+ (unsigned)tdb->feature_flags, TDB_SUPPORTED_FEATURE_FLAGS,
+ (tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX)?"yes":"no",
+ keys.min, tally_mean(&keys), keys.max,
+ data.min, tally_mean(&data), data.max,
+ extra.min, tally_mean(&extra), extra.max,
+ dead.num,
+ dead.min, tally_mean(&dead), dead.max,
+ freet.num,
+ freet.min, tally_mean(&freet), freet.max,
+ hashval.num,
+ hashval.min, tally_mean(&hashval), hashval.max,
+ uncoal.total,
+ uncoal.min, tally_mean(&uncoal), uncoal.max,
+ keys.total * 100.0 / file_size,
+ data.total * 100.0 / file_size,
+ extra.total * 100.0 / file_size,
+ freet.total * 100.0 / file_size,
+ dead.total * 100.0 / file_size,
+ (keys.num + freet.num + dead.num)
+ * (sizeof(struct tdb_record) + sizeof(uint32_t))
+ * 100.0 / file_size,
+ tdb->hash_size * sizeof(tdb_off_t)
+ * 100.0 / file_size);
+ if (len == -1) {
+ goto unlock;
+ }
+
+unlock:
+ if (locked) {
+ tdb_unlockall_read(tdb);
+ }
+ return ret;
+}
diff --git a/common/tdb.c b/common/tdb.c
new file mode 100644
index 0000000..9885d8c
--- /dev/null
+++ b/common/tdb.c
@@ -0,0 +1,1141 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+_PUBLIC_ TDB_DATA tdb_null;
+
+/*
+ non-blocking increment of the tdb sequence number if the tdb has been opened using
+ the TDB_SEQNUM flag
+*/
+_PUBLIC_ void tdb_increment_seqnum_nonblock(struct tdb_context *tdb)
+{
+ tdb_off_t seqnum=0;
+
+ if (!(tdb->flags & TDB_SEQNUM)) {
+ return;
+ }
+
+ /* we ignore errors from this, as we have no sane way of
+ dealing with them.
+ */
+ tdb_ofs_read(tdb, TDB_SEQNUM_OFS, &seqnum);
+ seqnum++;
+ tdb_ofs_write(tdb, TDB_SEQNUM_OFS, &seqnum);
+}
+
+/*
+ increment the tdb sequence number if the tdb has been opened using
+ the TDB_SEQNUM flag
+*/
+static void tdb_increment_seqnum(struct tdb_context *tdb)
+{
+ if (!(tdb->flags & TDB_SEQNUM)) {
+ return;
+ }
+
+ if (tdb_nest_lock(tdb, TDB_SEQNUM_OFS, F_WRLCK,
+ TDB_LOCK_WAIT|TDB_LOCK_PROBE) != 0) {
+ return;
+ }
+
+ tdb_increment_seqnum_nonblock(tdb);
+
+ tdb_nest_unlock(tdb, TDB_SEQNUM_OFS, F_WRLCK, false);
+}
+
+static int tdb_key_compare(TDB_DATA key, TDB_DATA data, void *private_data)
+{
+ return memcmp(data.dptr, key.dptr, data.dsize);
+}
+
+/* Returns 0 on fail. On success, return offset of record, and fills
+ in rec */
+static tdb_off_t tdb_find(struct tdb_context *tdb, TDB_DATA key, uint32_t hash,
+ struct tdb_record *r)
+{
+ tdb_off_t rec_ptr;
+
+ /* read in the hash top */
+ if (tdb_ofs_read(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1)
+ return 0;
+
+ /* keep looking until we find the right record */
+ while (rec_ptr) {
+ if (tdb_rec_read(tdb, rec_ptr, r) == -1)
+ return 0;
+
+ if (!TDB_DEAD(r) && hash==r->full_hash
+ && key.dsize==r->key_len
+ && tdb_parse_data(tdb, key, rec_ptr + sizeof(*r),
+ r->key_len, tdb_key_compare,
+ NULL) == 0) {
+ return rec_ptr;
+ }
+ /* detect tight infinite loop */
+ if (rec_ptr == r->next) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_find: loop detected.\n"));
+ return 0;
+ }
+ rec_ptr = r->next;
+ }
+ tdb->ecode = TDB_ERR_NOEXIST;
+ return 0;
+}
+
+/* As tdb_find, but if you succeed, keep the lock */
+tdb_off_t tdb_find_lock_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, int locktype,
+ struct tdb_record *rec)
+{
+ uint32_t rec_ptr;
+
+ if (tdb_lock(tdb, BUCKET(hash), locktype) == -1)
+ return 0;
+ if (!(rec_ptr = tdb_find(tdb, key, hash, rec)))
+ tdb_unlock(tdb, BUCKET(hash), locktype);
+ return rec_ptr;
+}
+
+static TDB_DATA _tdb_fetch(struct tdb_context *tdb, TDB_DATA key);
+
+static int tdb_update_hash_cmp(TDB_DATA key, TDB_DATA data, void *private_data)
+{
+ TDB_DATA *dbuf = (TDB_DATA *)private_data;
+
+ if (dbuf->dsize != data.dsize) {
+ return -1;
+ }
+ if (memcmp(dbuf->dptr, data.dptr, data.dsize) != 0) {
+ return -1;
+ }
+ return 0;
+}
+
+/* update an entry in place - this only works if the new data size
+ is <= the old data size and the key exists.
+ on failure return -1.
+*/
+static int tdb_update_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, TDB_DATA dbuf)
+{
+ struct tdb_record rec;
+ tdb_off_t rec_ptr;
+
+ /* find entry */
+ if (!(rec_ptr = tdb_find(tdb, key, hash, &rec)))
+ return -1;
+
+ /* it could be an exact duplicate of what is there - this is
+ * surprisingly common (eg. with a ldb re-index). */
+ if (rec.key_len == key.dsize &&
+ rec.data_len == dbuf.dsize &&
+ rec.full_hash == hash &&
+ tdb_parse_record(tdb, key, tdb_update_hash_cmp, &dbuf) == 0) {
+ return 0;
+ }
+
+ /* must be long enough key, data and tailer */
+ if (rec.rec_len < key.dsize + dbuf.dsize + sizeof(tdb_off_t)) {
+ tdb->ecode = TDB_SUCCESS; /* Not really an error */
+ return -1;
+ }
+
+ if (tdb->methods->tdb_write(tdb, rec_ptr + sizeof(rec) + rec.key_len,
+ dbuf.dptr, dbuf.dsize) == -1)
+ return -1;
+
+ if (dbuf.dsize != rec.data_len) {
+ /* update size */
+ rec.data_len = dbuf.dsize;
+ return tdb_rec_write(tdb, rec_ptr, &rec);
+ }
+
+ return 0;
+}
+
+/* find an entry in the database given a key */
+/* If an entry doesn't exist tdb_err will be set to
+ * TDB_ERR_NOEXIST. If a key has no data attached
+ * then the TDB_DATA will have zero length but
+ * a non-zero pointer
+ */
+static TDB_DATA _tdb_fetch(struct tdb_context *tdb, TDB_DATA key)
+{
+ tdb_off_t rec_ptr;
+ struct tdb_record rec;
+ TDB_DATA ret;
+ uint32_t hash;
+
+ /* find which hash bucket it is in */
+ hash = tdb->hash_fn(&key);
+ if (!(rec_ptr = tdb_find_lock_hash(tdb,key,hash,F_RDLCK,&rec)))
+ return tdb_null;
+
+ ret.dptr = tdb_alloc_read(tdb, rec_ptr + sizeof(rec) + rec.key_len,
+ rec.data_len);
+ ret.dsize = rec.data_len;
+ tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK);
+ return ret;
+}
+
+_PUBLIC_ TDB_DATA tdb_fetch(struct tdb_context *tdb, TDB_DATA key)
+{
+ TDB_DATA ret = _tdb_fetch(tdb, key);
+
+ tdb_trace_1rec_retrec(tdb, "tdb_fetch", key, ret);
+ return ret;
+}
+
+/*
+ * Find an entry in the database and hand the record's data to a parsing
+ * function. The parsing function is executed under the chain read lock, so it
+ * should be fast and should not block on other syscalls.
+ *
+ * DON'T CALL OTHER TDB CALLS FROM THE PARSER, THIS MIGHT LEAD TO SEGFAULTS.
+ *
+ * For mmapped tdb's that do not have a transaction open it points the parsing
+ * function directly at the mmap area, it avoids the malloc/memcpy in this
+ * case. If a transaction is open or no mmap is available, it has to do
+ * malloc/read/parse/free.
+ *
+ * This is interesting for all readers of potentially large data structures in
+ * the tdb records, ldb indexes being one example.
+ *
+ * Return -1 if the record was not found.
+ */
+
+_PUBLIC_ int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key,
+ int (*parser)(TDB_DATA key, TDB_DATA data,
+ void *private_data),
+ void *private_data)
+{
+ tdb_off_t rec_ptr;
+ struct tdb_record rec;
+ int ret;
+ uint32_t hash;
+
+ /* find which hash bucket it is in */
+ hash = tdb->hash_fn(&key);
+
+ if (!(rec_ptr = tdb_find_lock_hash(tdb,key,hash,F_RDLCK,&rec))) {
+ /* record not found */
+ tdb_trace_1rec_ret(tdb, "tdb_parse_record", key, -1);
+ tdb->ecode = TDB_ERR_NOEXIST;
+ return -1;
+ }
+ tdb_trace_1rec_ret(tdb, "tdb_parse_record", key, 0);
+
+ ret = tdb_parse_data(tdb, key, rec_ptr + sizeof(rec) + rec.key_len,
+ rec.data_len, parser, private_data);
+
+ tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK);
+
+ return ret;
+}
+
+/* check if an entry in the database exists
+
+ note that 1 is returned if the key is found and 0 is returned if not found
+ this doesn't match the conventions in the rest of this module, but is
+ compatible with gdbm
+*/
+static int tdb_exists_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash)
+{
+ struct tdb_record rec;
+
+ if (tdb_find_lock_hash(tdb, key, hash, F_RDLCK, &rec) == 0)
+ return 0;
+ tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK);
+ return 1;
+}
+
+_PUBLIC_ int tdb_exists(struct tdb_context *tdb, TDB_DATA key)
+{
+ uint32_t hash = tdb->hash_fn(&key);
+ int ret;
+
+ ret = tdb_exists_hash(tdb, key, hash);
+ tdb_trace_1rec_ret(tdb, "tdb_exists", key, ret);
+ return ret;
+}
+
+/* actually delete an entry in the database given the offset */
+int tdb_do_delete(struct tdb_context *tdb, tdb_off_t rec_ptr, struct tdb_record *rec)
+{
+ tdb_off_t last_ptr, i;
+ struct tdb_record lastrec;
+
+ if (tdb->read_only || tdb->traverse_read) return -1;
+
+ if (((tdb->traverse_write != 0) && (!TDB_DEAD(rec))) ||
+ tdb_write_lock_record(tdb, rec_ptr) == -1) {
+ /* Someone traversing here: mark it as dead */
+ rec->magic = TDB_DEAD_MAGIC;
+ return tdb_rec_write(tdb, rec_ptr, rec);
+ }
+ if (tdb_write_unlock_record(tdb, rec_ptr) != 0)
+ return -1;
+
+ /* find previous record in hash chain */
+ if (tdb_ofs_read(tdb, TDB_HASH_TOP(rec->full_hash), &i) == -1)
+ return -1;
+ for (last_ptr = 0; i != rec_ptr; last_ptr = i, i = lastrec.next)
+ if (tdb_rec_read(tdb, i, &lastrec) == -1)
+ return -1;
+
+ /* unlink it: next ptr is at start of record. */
+ if (last_ptr == 0)
+ last_ptr = TDB_HASH_TOP(rec->full_hash);
+ if (tdb_ofs_write(tdb, last_ptr, &rec->next) == -1)
+ return -1;
+
+ /* recover the space */
+ if (tdb_free(tdb, rec_ptr, rec) == -1)
+ return -1;
+ return 0;
+}
+
+static int tdb_count_dead(struct tdb_context *tdb, uint32_t hash)
+{
+ int res = 0;
+ tdb_off_t rec_ptr;
+ struct tdb_record rec;
+
+ /* read in the hash top */
+ if (tdb_ofs_read(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1)
+ return 0;
+
+ while (rec_ptr) {
+ if (tdb_rec_read(tdb, rec_ptr, &rec) == -1)
+ return 0;
+
+ if (rec.magic == TDB_DEAD_MAGIC) {
+ res += 1;
+ }
+ rec_ptr = rec.next;
+ }
+ return res;
+}
+
+/*
+ * Purge all DEAD records from a hash chain
+ */
+int tdb_purge_dead(struct tdb_context *tdb, uint32_t hash)
+{
+ int res = -1;
+ struct tdb_record rec;
+ tdb_off_t rec_ptr;
+
+ if (tdb_lock_nonblock(tdb, -1, F_WRLCK) == -1) {
+ /*
+ * Don't block the freelist if not strictly necessary
+ */
+ return -1;
+ }
+
+ /* read in the hash top */
+ if (tdb_ofs_read(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1)
+ goto fail;
+
+ while (rec_ptr) {
+ tdb_off_t next;
+
+ if (tdb_rec_read(tdb, rec_ptr, &rec) == -1) {
+ goto fail;
+ }
+
+ next = rec.next;
+
+ if (rec.magic == TDB_DEAD_MAGIC
+ && tdb_do_delete(tdb, rec_ptr, &rec) == -1) {
+ goto fail;
+ }
+ rec_ptr = next;
+ }
+ res = 0;
+ fail:
+ tdb_unlock(tdb, -1, F_WRLCK);
+ return res;
+}
+
+/* delete an entry in the database given a key */
+static int tdb_delete_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash)
+{
+ tdb_off_t rec_ptr;
+ struct tdb_record rec;
+ int ret;
+
+ rec_ptr = tdb_find_lock_hash(tdb, key, hash, F_WRLCK, &rec);
+ if (rec_ptr == 0) {
+ return -1;
+ }
+
+ if (tdb->max_dead_records != 0) {
+
+ uint32_t magic = TDB_DEAD_MAGIC;
+
+ /*
+ * Allow for some dead records per hash chain, mainly for
+ * tdb's with a very high create/delete rate like locking.tdb.
+ */
+
+ if (tdb_count_dead(tdb, hash) >= tdb->max_dead_records) {
+ /*
+ * Don't let the per-chain freelist grow too large,
+ * delete all existing dead records
+ */
+ tdb_purge_dead(tdb, hash);
+ }
+
+ /*
+ * Just mark the record as dead.
+ */
+ ret = tdb_ofs_write(
+ tdb, rec_ptr + offsetof(struct tdb_record, magic),
+ &magic);
+ }
+ else {
+ ret = tdb_do_delete(tdb, rec_ptr, &rec);
+ }
+
+ if (ret == 0) {
+ tdb_increment_seqnum(tdb);
+ }
+
+ if (tdb_unlock(tdb, BUCKET(hash), F_WRLCK) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_WARNING, "tdb_delete: WARNING tdb_unlock failed!\n"));
+ return ret;
+}
+
+_PUBLIC_ int tdb_delete(struct tdb_context *tdb, TDB_DATA key)
+{
+ uint32_t hash = tdb->hash_fn(&key);
+ int ret;
+
+ ret = tdb_delete_hash(tdb, key, hash);
+ tdb_trace_1rec_ret(tdb, "tdb_delete", key, ret);
+ return ret;
+}
+
+/*
+ * See if we have a dead record around with enough space
+ */
+tdb_off_t tdb_find_dead(struct tdb_context *tdb, uint32_t hash,
+ struct tdb_record *r, tdb_len_t length,
+ tdb_off_t *p_last_ptr)
+{
+ tdb_off_t rec_ptr, last_ptr;
+ tdb_off_t best_rec_ptr = 0;
+ tdb_off_t best_last_ptr = 0;
+ struct tdb_record best = { .rec_len = UINT32_MAX };
+
+ length += sizeof(tdb_off_t); /* tailer */
+
+ last_ptr = TDB_HASH_TOP(hash);
+
+ /* read in the hash top */
+ if (tdb_ofs_read(tdb, last_ptr, &rec_ptr) == -1)
+ return 0;
+
+ /* keep looking until we find the right record */
+ while (rec_ptr) {
+ if (tdb_rec_read(tdb, rec_ptr, r) == -1)
+ return 0;
+
+ if (TDB_DEAD(r) && (r->rec_len >= length) &&
+ (r->rec_len < best.rec_len)) {
+ best_rec_ptr = rec_ptr;
+ best_last_ptr = last_ptr;
+ best = *r;
+ }
+ last_ptr = rec_ptr;
+ rec_ptr = r->next;
+ }
+
+ if (best.rec_len == UINT32_MAX) {
+ return 0;
+ }
+
+ *r = best;
+ *p_last_ptr = best_last_ptr;
+ return best_rec_ptr;
+}
+
+static int _tdb_store(struct tdb_context *tdb, TDB_DATA key,
+ TDB_DATA dbuf, int flag, uint32_t hash)
+{
+ struct tdb_record rec;
+ tdb_off_t rec_ptr;
+ int ret = -1;
+
+ /* check for it existing, on insert. */
+ if (flag == TDB_INSERT) {
+ if (tdb_exists_hash(tdb, key, hash)) {
+ tdb->ecode = TDB_ERR_EXISTS;
+ goto fail;
+ }
+ } else {
+ /* first try in-place update, on modify or replace. */
+ if (tdb_update_hash(tdb, key, hash, dbuf) == 0) {
+ goto done;
+ }
+ if (tdb->ecode == TDB_ERR_NOEXIST &&
+ flag == TDB_MODIFY) {
+ /* if the record doesn't exist and we are in TDB_MODIFY mode then
+ we should fail the store */
+ goto fail;
+ }
+ }
+ /* reset the error code potentially set by the tdb_update_hash() */
+ tdb->ecode = TDB_SUCCESS;
+
+ /* delete any existing record - if it doesn't exist we don't
+ care. Doing this first reduces fragmentation, and avoids
+ coalescing with `allocated' block before it's updated. */
+ if (flag != TDB_INSERT)
+ tdb_delete_hash(tdb, key, hash);
+
+ /* we have to allocate some space */
+ rec_ptr = tdb_allocate(tdb, hash, key.dsize + dbuf.dsize, &rec);
+
+ if (rec_ptr == 0) {
+ goto fail;
+ }
+
+ /* Read hash top into next ptr */
+ if (tdb_ofs_read(tdb, TDB_HASH_TOP(hash), &rec.next) == -1)
+ goto fail;
+
+ rec.key_len = key.dsize;
+ rec.data_len = dbuf.dsize;
+ rec.full_hash = hash;
+ rec.magic = TDB_MAGIC;
+
+ /* write out and point the top of the hash chain at it */
+ if (tdb_rec_write(tdb, rec_ptr, &rec) == -1
+ || tdb->methods->tdb_write(tdb, rec_ptr+sizeof(rec),
+ key.dptr, key.dsize) == -1
+ || tdb->methods->tdb_write(tdb, rec_ptr+sizeof(rec)+key.dsize,
+ dbuf.dptr, dbuf.dsize) == -1
+ || tdb_ofs_write(tdb, TDB_HASH_TOP(hash), &rec_ptr) == -1) {
+ /* Need to tdb_unallocate() here */
+ goto fail;
+ }
+
+ done:
+ ret = 0;
+ fail:
+ if (ret == 0) {
+ tdb_increment_seqnum(tdb);
+ }
+ return ret;
+}
+
+/* store an element in the database, replacing any existing element
+ with the same key
+
+ return 0 on success, -1 on failure
+*/
+_PUBLIC_ int tdb_store(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag)
+{
+ uint32_t hash;
+ int ret;
+
+ if (tdb->read_only || tdb->traverse_read) {
+ tdb->ecode = TDB_ERR_RDONLY;
+ tdb_trace_2rec_flag_ret(tdb, "tdb_store", key, dbuf, flag, -1);
+ return -1;
+ }
+
+ /* find which hash bucket it is in */
+ hash = tdb->hash_fn(&key);
+ if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1)
+ return -1;
+
+ ret = _tdb_store(tdb, key, dbuf, flag, hash);
+ tdb_trace_2rec_flag_ret(tdb, "tdb_store", key, dbuf, flag, ret);
+ tdb_unlock(tdb, BUCKET(hash), F_WRLCK);
+ return ret;
+}
+
+/* Append to an entry. Create if not exist. */
+_PUBLIC_ int tdb_append(struct tdb_context *tdb, TDB_DATA key, TDB_DATA new_dbuf)
+{
+ uint32_t hash;
+ TDB_DATA dbuf;
+ int ret = -1;
+
+ /* find which hash bucket it is in */
+ hash = tdb->hash_fn(&key);
+ if (tdb_lock(tdb, BUCKET(hash), F_WRLCK) == -1)
+ return -1;
+
+ dbuf = _tdb_fetch(tdb, key);
+
+ if (dbuf.dptr == NULL) {
+ dbuf.dptr = (unsigned char *)malloc(new_dbuf.dsize);
+ } else {
+ unsigned int new_len = dbuf.dsize + new_dbuf.dsize;
+ unsigned char *new_dptr;
+
+ /* realloc '0' is special: don't do that. */
+ if (new_len == 0)
+ new_len = 1;
+ new_dptr = (unsigned char *)realloc(dbuf.dptr, new_len);
+ if (new_dptr == NULL) {
+ free(dbuf.dptr);
+ }
+ dbuf.dptr = new_dptr;
+ }
+
+ if (dbuf.dptr == NULL) {
+ tdb->ecode = TDB_ERR_OOM;
+ goto failed;
+ }
+
+ memcpy(dbuf.dptr + dbuf.dsize, new_dbuf.dptr, new_dbuf.dsize);
+ dbuf.dsize += new_dbuf.dsize;
+
+ ret = _tdb_store(tdb, key, dbuf, 0, hash);
+ tdb_trace_2rec_retrec(tdb, "tdb_append", key, new_dbuf, dbuf);
+
+failed:
+ tdb_unlock(tdb, BUCKET(hash), F_WRLCK);
+ SAFE_FREE(dbuf.dptr);
+ return ret;
+}
+
+
+/*
+ return the name of the current tdb file
+ useful for external logging functions
+*/
+_PUBLIC_ const char *tdb_name(struct tdb_context *tdb)
+{
+ return tdb->name;
+}
+
+/*
+ return the underlying file descriptor being used by tdb, or -1
+ useful for external routines that want to check the device/inode
+ of the fd
+*/
+_PUBLIC_ int tdb_fd(struct tdb_context *tdb)
+{
+ return tdb->fd;
+}
+
+/*
+ return the current logging function
+ useful for external tdb routines that wish to log tdb errors
+*/
+_PUBLIC_ tdb_log_func tdb_log_fn(struct tdb_context *tdb)
+{
+ return tdb->log.log_fn;
+}
+
+
+/*
+ get the tdb sequence number. Only makes sense if the writers opened
+ with TDB_SEQNUM set. Note that this sequence number will wrap quite
+ quickly, so it should only be used for a 'has something changed'
+ test, not for code that relies on the count of the number of changes
+ made. If you want a counter then use a tdb record.
+
+ The aim of this sequence number is to allow for a very lightweight
+ test of a possible tdb change.
+*/
+_PUBLIC_ int tdb_get_seqnum(struct tdb_context *tdb)
+{
+ tdb_off_t seqnum=0;
+
+ tdb_ofs_read(tdb, TDB_SEQNUM_OFS, &seqnum);
+ return seqnum;
+}
+
+_PUBLIC_ int tdb_hash_size(struct tdb_context *tdb)
+{
+ return tdb->hash_size;
+}
+
+_PUBLIC_ size_t tdb_map_size(struct tdb_context *tdb)
+{
+ return tdb->map_size;
+}
+
+_PUBLIC_ int tdb_get_flags(struct tdb_context *tdb)
+{
+ return tdb->flags;
+}
+
+_PUBLIC_ void tdb_add_flags(struct tdb_context *tdb, unsigned flags)
+{
+ if ((flags & TDB_ALLOW_NESTING) &&
+ (flags & TDB_DISALLOW_NESTING)) {
+ tdb->ecode = TDB_ERR_NESTING;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_add_flags: "
+ "allow_nesting and disallow_nesting are not allowed together!"));
+ return;
+ }
+
+ if (flags & TDB_ALLOW_NESTING) {
+ tdb->flags &= ~TDB_DISALLOW_NESTING;
+ }
+ if (flags & TDB_DISALLOW_NESTING) {
+ tdb->flags &= ~TDB_ALLOW_NESTING;
+ }
+
+ tdb->flags |= flags;
+}
+
+_PUBLIC_ void tdb_remove_flags(struct tdb_context *tdb, unsigned flags)
+{
+ if ((flags & TDB_ALLOW_NESTING) &&
+ (flags & TDB_DISALLOW_NESTING)) {
+ tdb->ecode = TDB_ERR_NESTING;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_remove_flags: "
+ "allow_nesting and disallow_nesting are not allowed together!"));
+ return;
+ }
+
+ if ((flags & TDB_NOLOCK) &&
+ (tdb->feature_flags & TDB_FEATURE_FLAG_MUTEX) &&
+ (tdb->mutexes == NULL)) {
+ tdb->ecode = TDB_ERR_LOCK;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_remove_flags: "
+ "Can not remove NOLOCK flag on mutexed databases"));
+ return;
+ }
+
+ if (flags & TDB_ALLOW_NESTING) {
+ tdb->flags |= TDB_DISALLOW_NESTING;
+ }
+ if (flags & TDB_DISALLOW_NESTING) {
+ tdb->flags |= TDB_ALLOW_NESTING;
+ }
+
+ tdb->flags &= ~flags;
+}
+
+
+/*
+ enable sequence number handling on an open tdb
+*/
+_PUBLIC_ void tdb_enable_seqnum(struct tdb_context *tdb)
+{
+ tdb->flags |= TDB_SEQNUM;
+}
+
+
+/*
+ add a region of the file to the freelist. Length is the size of the region in bytes,
+ which includes the free list header that needs to be added
+ */
+static int tdb_free_region(struct tdb_context *tdb, tdb_off_t offset, ssize_t length)
+{
+ struct tdb_record rec;
+ if (length <= sizeof(rec)) {
+ /* the region is not worth adding */
+ return 0;
+ }
+ if (length + offset > tdb->map_size) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_free_region: adding region beyond end of file\n"));
+ return -1;
+ }
+ memset(&rec,'\0',sizeof(rec));
+ rec.rec_len = length - sizeof(rec);
+ if (tdb_free(tdb, offset, &rec) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_free_region: failed to add free record\n"));
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ wipe the entire database, deleting all records. This can be done
+ very fast by using a allrecord lock. The entire data portion of the
+ file becomes a single entry in the freelist.
+
+ This code carefully steps around the recovery area, leaving it alone
+ */
+_PUBLIC_ int tdb_wipe_all(struct tdb_context *tdb)
+{
+ int i;
+ tdb_off_t offset = 0;
+ ssize_t data_len;
+ tdb_off_t recovery_head;
+ tdb_len_t recovery_size = 0;
+
+ if (tdb_lockall(tdb) != 0) {
+ return -1;
+ }
+
+ tdb_trace(tdb, "tdb_wipe_all");
+
+ /* see if the tdb has a recovery area, and remember its size
+ if so. We don't want to lose this as otherwise each
+ tdb_wipe_all() in a transaction will increase the size of
+ the tdb by the size of the recovery area */
+ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &recovery_head) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_wipe_all: failed to read recovery head\n"));
+ goto failed;
+ }
+
+ if (recovery_head != 0) {
+ struct tdb_record rec;
+ if (tdb->methods->tdb_read(tdb, recovery_head, &rec, sizeof(rec), DOCONV()) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_wipe_all: failed to read recovery record\n"));
+ return -1;
+ }
+ recovery_size = rec.rec_len + sizeof(rec);
+ }
+
+ /* wipe the hashes */
+ for (i=0;i<tdb->hash_size;i++) {
+ if (tdb_ofs_write(tdb, TDB_HASH_TOP(i), &offset) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_wipe_all: failed to write hash %d\n", i));
+ goto failed;
+ }
+ }
+
+ /* wipe the freelist */
+ if (tdb_ofs_write(tdb, FREELIST_TOP, &offset) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_wipe_all: failed to write freelist\n"));
+ goto failed;
+ }
+
+ /* add all the rest of the file to the freelist, possibly leaving a gap
+ for the recovery area */
+ if (recovery_size == 0) {
+ /* the simple case - the whole file can be used as a freelist */
+ data_len = (tdb->map_size - TDB_DATA_START(tdb->hash_size));
+ if (tdb_free_region(tdb, TDB_DATA_START(tdb->hash_size), data_len) != 0) {
+ goto failed;
+ }
+ } else {
+ /* we need to add two freelist entries - one on either
+ side of the recovery area
+
+ Note that we cannot shift the recovery area during
+ this operation. Only the transaction.c code may
+ move the recovery area or we risk subtle data
+ corruption
+ */
+ data_len = (recovery_head - TDB_DATA_START(tdb->hash_size));
+ if (tdb_free_region(tdb, TDB_DATA_START(tdb->hash_size), data_len) != 0) {
+ goto failed;
+ }
+ /* and the 2nd free list entry after the recovery area - if any */
+ data_len = tdb->map_size - (recovery_head+recovery_size);
+ if (tdb_free_region(tdb, recovery_head+recovery_size, data_len) != 0) {
+ goto failed;
+ }
+ }
+
+ tdb_increment_seqnum_nonblock(tdb);
+
+ if (tdb_unlockall(tdb) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,"tdb_wipe_all: failed to unlock\n"));
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ tdb_unlockall(tdb);
+ return -1;
+}
+
+struct traverse_state {
+ bool error;
+ struct tdb_context *dest_db;
+};
+
+/*
+ traverse function for repacking
+ */
+static int repack_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data, void *private_data)
+{
+ struct traverse_state *state = (struct traverse_state *)private_data;
+ if (tdb_store(state->dest_db, key, data, TDB_INSERT) != 0) {
+ state->error = true;
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ repack a tdb
+ */
+_PUBLIC_ int tdb_repack(struct tdb_context *tdb)
+{
+ struct tdb_context *tmp_db;
+ struct traverse_state state;
+
+ tdb_trace(tdb, "tdb_repack");
+
+ if (tdb_transaction_start(tdb) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to start transaction\n"));
+ return -1;
+ }
+
+ tmp_db = tdb_open("tmpdb", tdb_hash_size(tdb), TDB_INTERNAL, O_RDWR|O_CREAT, 0);
+ if (tmp_db == NULL) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to create tmp_db\n"));
+ tdb_transaction_cancel(tdb);
+ return -1;
+ }
+
+ state.error = false;
+ state.dest_db = tmp_db;
+
+ if (tdb_traverse_read(tdb, repack_traverse, &state) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to traverse copying out\n"));
+ tdb_transaction_cancel(tdb);
+ tdb_close(tmp_db);
+ return -1;
+ }
+
+ if (state.error) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Error during traversal\n"));
+ tdb_transaction_cancel(tdb);
+ tdb_close(tmp_db);
+ return -1;
+ }
+
+ if (tdb_wipe_all(tdb) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to wipe database\n"));
+ tdb_transaction_cancel(tdb);
+ tdb_close(tmp_db);
+ return -1;
+ }
+
+ state.error = false;
+ state.dest_db = tdb;
+
+ if (tdb_traverse_read(tmp_db, repack_traverse, &state) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to traverse copying back\n"));
+ tdb_transaction_cancel(tdb);
+ tdb_close(tmp_db);
+ return -1;
+ }
+
+ if (state.error) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Error during second traversal\n"));
+ tdb_transaction_cancel(tdb);
+ tdb_close(tmp_db);
+ return -1;
+ }
+
+ tdb_close(tmp_db);
+
+ if (tdb_transaction_commit(tdb) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, __location__ " Failed to commit\n"));
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Even on files, we can get partial writes due to signals. */
+bool tdb_write_all(int fd, const void *buf, size_t count)
+{
+ while (count) {
+ ssize_t ret;
+ ret = write(fd, buf, count);
+ if (ret < 0)
+ return false;
+ buf = (const char *)buf + ret;
+ count -= ret;
+ }
+ return true;
+}
+
+bool tdb_add_off_t(tdb_off_t a, tdb_off_t b, tdb_off_t *pret)
+{
+ tdb_off_t ret = a + b;
+
+ if ((ret < a) || (ret < b)) {
+ return false;
+ }
+ *pret = ret;
+ return true;
+}
+
+#ifdef TDB_TRACE
+static void tdb_trace_write(struct tdb_context *tdb, const char *str)
+{
+ if (!tdb_write_all(tdb->tracefd, str, strlen(str))) {
+ close(tdb->tracefd);
+ tdb->tracefd = -1;
+ }
+}
+
+static void tdb_trace_start(struct tdb_context *tdb)
+{
+ tdb_off_t seqnum=0;
+ char msg[sizeof(tdb_off_t) * 4 + 1];
+
+ tdb_ofs_read(tdb, TDB_SEQNUM_OFS, &seqnum);
+ snprintf(msg, sizeof(msg), "%u ", seqnum);
+ tdb_trace_write(tdb, msg);
+}
+
+static void tdb_trace_end(struct tdb_context *tdb)
+{
+ tdb_trace_write(tdb, "\n");
+}
+
+static void tdb_trace_end_ret(struct tdb_context *tdb, int ret)
+{
+ char msg[sizeof(ret) * 4 + 4];
+ snprintf(msg, sizeof(msg), " = %i\n", ret);
+ tdb_trace_write(tdb, msg);
+}
+
+static void tdb_trace_record(struct tdb_context *tdb, TDB_DATA rec)
+{
+ char msg[20 + rec.dsize*2], *p;
+ unsigned int i;
+
+ /* We differentiate zero-length records from non-existent ones. */
+ if (rec.dptr == NULL) {
+ tdb_trace_write(tdb, " NULL");
+ return;
+ }
+
+ /* snprintf here is purely cargo-cult programming. */
+ p = msg;
+ p += snprintf(p, sizeof(msg), " %zu:", rec.dsize);
+ for (i = 0; i < rec.dsize; i++)
+ p += snprintf(p, 2, "%02x", rec.dptr[i]);
+
+ tdb_trace_write(tdb, msg);
+}
+
+void tdb_trace(struct tdb_context *tdb, const char *op)
+{
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_end(tdb);
+}
+
+void tdb_trace_seqnum(struct tdb_context *tdb, uint32_t seqnum, const char *op)
+{
+ char msg[sizeof(tdb_off_t) * 4 + 1];
+
+ snprintf(msg, sizeof(msg), "%u ", seqnum);
+ tdb_trace_write(tdb, msg);
+ tdb_trace_write(tdb, op);
+ tdb_trace_end(tdb);
+}
+
+void tdb_trace_open(struct tdb_context *tdb, const char *op,
+ unsigned hash_size, unsigned tdb_flags, unsigned open_flags)
+{
+ char msg[128];
+
+ snprintf(msg, sizeof(msg),
+ "%s %u 0x%x 0x%x", op, hash_size, tdb_flags, open_flags);
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, msg);
+ tdb_trace_end(tdb);
+}
+
+void tdb_trace_ret(struct tdb_context *tdb, const char *op, int ret)
+{
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_end_ret(tdb, ret);
+}
+
+void tdb_trace_retrec(struct tdb_context *tdb, const char *op, TDB_DATA ret)
+{
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_write(tdb, " =");
+ tdb_trace_record(tdb, ret);
+ tdb_trace_end(tdb);
+}
+
+void tdb_trace_1rec(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec)
+{
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_record(tdb, rec);
+ tdb_trace_end(tdb);
+}
+
+void tdb_trace_1rec_ret(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec, int ret)
+{
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_record(tdb, rec);
+ tdb_trace_end_ret(tdb, ret);
+}
+
+void tdb_trace_1rec_retrec(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec, TDB_DATA ret)
+{
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_record(tdb, rec);
+ tdb_trace_write(tdb, " =");
+ tdb_trace_record(tdb, ret);
+ tdb_trace_end(tdb);
+}
+
+void tdb_trace_2rec_flag_ret(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec1, TDB_DATA rec2, unsigned flag,
+ int ret)
+{
+ char msg[1 + sizeof(ret) * 4];
+
+ snprintf(msg, sizeof(msg), " %#x", flag);
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_record(tdb, rec1);
+ tdb_trace_record(tdb, rec2);
+ tdb_trace_write(tdb, msg);
+ tdb_trace_end_ret(tdb, ret);
+}
+
+void tdb_trace_2rec_retrec(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec1, TDB_DATA rec2, TDB_DATA ret)
+{
+ tdb_trace_start(tdb);
+ tdb_trace_write(tdb, op);
+ tdb_trace_record(tdb, rec1);
+ tdb_trace_record(tdb, rec2);
+ tdb_trace_write(tdb, " =");
+ tdb_trace_record(tdb, ret);
+ tdb_trace_end(tdb);
+}
+#endif
diff --git a/common/tdb_private.h b/common/tdb_private.h
new file mode 100644
index 0000000..de8d9e6
--- /dev/null
+++ b/common/tdb_private.h
@@ -0,0 +1,327 @@
+#ifndef TDB_PRIVATE_H
+#define TDB_PRIVATE_H
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library - private includes
+
+ Copyright (C) Andrew Tridgell 2005
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "system/filesys.h"
+#include "system/time.h"
+#include "system/shmem.h"
+#include "system/select.h"
+#include "system/wait.h"
+#include "tdb.h"
+
+/* #define TDB_TRACE 1 */
+#ifndef HAVE_GETPAGESIZE
+#define getpagesize() 0x2000
+#endif
+
+typedef uint32_t tdb_len_t;
+typedef uint32_t tdb_off_t;
+
+#ifndef offsetof
+#define offsetof(t,f) ((unsigned int)&((t *)0)->f)
+#endif
+
+#define TDB_MAGIC_FOOD "TDB file\n"
+#define TDB_VERSION (0x26011967 + 6)
+#define TDB_MAGIC (0x26011999U)
+#define TDB_FREE_MAGIC (~TDB_MAGIC)
+#define TDB_DEAD_MAGIC (0xFEE1DEAD)
+#define TDB_RECOVERY_MAGIC (0xf53bc0e7U)
+#define TDB_RECOVERY_INVALID_MAGIC (0x0)
+#define TDB_HASH_RWLOCK_MAGIC (0xbad1a51U)
+#define TDB_FEATURE_FLAG_MAGIC (0xbad1a52U)
+#define TDB_ALIGNMENT 4
+#define DEFAULT_HASH_SIZE 131
+#define FREELIST_TOP (sizeof(struct tdb_header))
+#define TDB_ALIGN(x,a) (((x) + (a)-1) & ~((a)-1))
+#define TDB_BYTEREV(x) (((((x)&0xff)<<24)|((x)&0xFF00)<<8)|(((x)>>8)&0xFF00)|((x)>>24))
+#define TDB_DEAD(r) ((r)->magic == TDB_DEAD_MAGIC)
+#define TDB_BAD_MAGIC(r) ((r)->magic != TDB_MAGIC && !TDB_DEAD(r))
+#define TDB_HASH_TOP(hash) (FREELIST_TOP + (BUCKET(hash)+1)*sizeof(tdb_off_t))
+#define TDB_HASHTABLE_SIZE(tdb) ((tdb->hash_size+1)*sizeof(tdb_off_t))
+#define TDB_DATA_START(hash_size) (TDB_HASH_TOP(hash_size-1) + sizeof(tdb_off_t))
+#define TDB_RECOVERY_HEAD offsetof(struct tdb_header, recovery_start)
+#define TDB_SEQNUM_OFS offsetof(struct tdb_header, sequence_number)
+#define TDB_PAD_BYTE 0x42
+#define TDB_PAD_U32 0x42424242
+
+#define TDB_FEATURE_FLAG_MUTEX 0x00000001
+
+#define TDB_SUPPORTED_FEATURE_FLAGS ( \
+ TDB_FEATURE_FLAG_MUTEX | \
+ 0)
+
+/* NB assumes there is a local variable called "tdb" that is the
+ * current context, also takes doubly-parenthesized print-style
+ * argument. */
+#define TDB_LOG(x) tdb->log.log_fn x
+
+#ifdef TDB_TRACE
+void tdb_trace(struct tdb_context *tdb, const char *op);
+void tdb_trace_seqnum(struct tdb_context *tdb, uint32_t seqnum, const char *op);
+void tdb_trace_open(struct tdb_context *tdb, const char *op,
+ unsigned hash_size, unsigned tdb_flags, unsigned open_flags);
+void tdb_trace_ret(struct tdb_context *tdb, const char *op, int ret);
+void tdb_trace_retrec(struct tdb_context *tdb, const char *op, TDB_DATA ret);
+void tdb_trace_1rec(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec);
+void tdb_trace_1rec_ret(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec, int ret);
+void tdb_trace_1rec_retrec(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec, TDB_DATA ret);
+void tdb_trace_2rec_flag_ret(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec1, TDB_DATA rec2, unsigned flag,
+ int ret);
+void tdb_trace_2rec_retrec(struct tdb_context *tdb, const char *op,
+ TDB_DATA rec1, TDB_DATA rec2, TDB_DATA ret);
+#else
+#define tdb_trace(tdb, op)
+#define tdb_trace_seqnum(tdb, seqnum, op)
+#define tdb_trace_open(tdb, op, hash_size, tdb_flags, open_flags)
+#define tdb_trace_ret(tdb, op, ret)
+#define tdb_trace_retrec(tdb, op, ret)
+#define tdb_trace_1rec(tdb, op, rec)
+#define tdb_trace_1rec_ret(tdb, op, rec, ret)
+#define tdb_trace_1rec_retrec(tdb, op, rec, ret)
+#define tdb_trace_2rec_flag_ret(tdb, op, rec1, rec2, flag, ret)
+#define tdb_trace_2rec_retrec(tdb, op, rec1, rec2, ret)
+#endif /* !TDB_TRACE */
+
+/* lock offsets */
+#define OPEN_LOCK 0
+#define ACTIVE_LOCK 4
+#define TRANSACTION_LOCK 8
+
+/* free memory if the pointer is valid and zero the pointer */
+#ifndef SAFE_FREE
+#define SAFE_FREE(x) do { if ((x) != NULL) {free(x); (x)=NULL;} } while(0)
+#endif
+
+#define BUCKET(hash) ((hash) % tdb->hash_size)
+
+#define DOCONV() (tdb->flags & TDB_CONVERT)
+#define CONVERT(x) (DOCONV() ? tdb_convert(&x, sizeof(x)) : &x)
+
+
+/* the body of the database is made of one tdb_record for the free space
+ plus a separate data list for each hash value */
+struct tdb_record {
+ tdb_off_t next; /* offset of the next record in the list */
+ tdb_len_t rec_len; /* total byte length of record */
+ tdb_len_t key_len; /* byte length of key */
+ tdb_len_t data_len; /* byte length of data */
+ uint32_t full_hash; /* the full 32 bit hash of the key */
+ uint32_t magic; /* try to catch errors */
+ /* the following union is implied:
+ union {
+ char record[rec_len];
+ struct {
+ char key[key_len];
+ char data[data_len];
+ }
+ uint32_t totalsize; (tailer)
+ }
+ */
+};
+
+
+/* this is stored at the front of every database */
+struct tdb_header {
+ char magic_food[32]; /* for /etc/magic */
+ uint32_t version; /* version of the code */
+ uint32_t hash_size; /* number of hash entries */
+ tdb_off_t rwlocks; /* obsolete - kept to detect old formats */
+ tdb_off_t recovery_start; /* offset of transaction recovery region */
+ tdb_off_t sequence_number; /* used when TDB_SEQNUM is set */
+ uint32_t magic1_hash; /* hash of TDB_MAGIC_FOOD. */
+ uint32_t magic2_hash; /* hash of TDB_MAGIC. */
+ uint32_t feature_flags;
+ tdb_len_t mutex_size; /* set if TDB_FEATURE_FLAG_MUTEX is set */
+ tdb_off_t reserved[25];
+};
+
+struct tdb_lock_type {
+ uint32_t off;
+ uint32_t count;
+ uint32_t ltype;
+};
+
+struct tdb_traverse_lock {
+ struct tdb_traverse_lock *next;
+ uint32_t off;
+ uint32_t hash;
+ int lock_rw;
+};
+
+enum tdb_lock_flags {
+ /* WAIT == F_SETLKW, NOWAIT == F_SETLK */
+ TDB_LOCK_NOWAIT = 0,
+ TDB_LOCK_WAIT = 1,
+ /* If set, don't log an error on failure. */
+ TDB_LOCK_PROBE = 2,
+ /* If set, don't actually lock at all. */
+ TDB_LOCK_MARK_ONLY = 4,
+};
+
+struct tdb_methods {
+ int (*tdb_read)(struct tdb_context *, tdb_off_t , void *, tdb_len_t , int );
+ int (*tdb_write)(struct tdb_context *, tdb_off_t, const void *, tdb_len_t);
+ void (*next_hash_chain)(struct tdb_context *, uint32_t *);
+ int (*tdb_oob)(struct tdb_context *, tdb_off_t , tdb_len_t, int );
+ int (*tdb_expand_file)(struct tdb_context *, tdb_off_t , tdb_off_t );
+};
+
+struct tdb_mutexes;
+
+struct tdb_context {
+ char *name; /* the name of the database */
+ void *map_ptr; /* where it is currently mapped */
+ int fd; /* open file descriptor for the database */
+ tdb_len_t map_size; /* how much space has been mapped */
+ int read_only; /* opened read-only */
+ int traverse_read; /* read-only traversal */
+ int traverse_write; /* read-write traversal */
+ struct tdb_lock_type allrecord_lock; /* .offset == upgradable */
+ int num_lockrecs;
+ struct tdb_lock_type *lockrecs; /* only real locks, all with count>0 */
+ int lockrecs_array_length;
+
+ tdb_off_t hdr_ofs; /* this is 0 or header.mutex_size */
+ struct tdb_mutexes *mutexes; /* mmap of the mutex area */
+
+ enum TDB_ERROR ecode; /* error code for last tdb error */
+ uint32_t hash_size;
+ uint32_t feature_flags;
+ uint32_t flags; /* the flags passed to tdb_open */
+ struct tdb_traverse_lock travlocks; /* current traversal locks */
+ struct tdb_context *next; /* all tdbs to avoid multiple opens */
+ dev_t device; /* uniquely identifies this tdb */
+ ino_t inode; /* uniquely identifies this tdb */
+ struct tdb_logging_context log;
+ unsigned int (*hash_fn)(TDB_DATA *key);
+ int open_flags; /* flags used in the open - needed by reopen */
+ const struct tdb_methods *methods;
+ struct tdb_transaction *transaction;
+ int page_size;
+ int max_dead_records;
+#ifdef TDB_TRACE
+ int tracefd;
+#endif
+ volatile sig_atomic_t *interrupt_sig_ptr;
+};
+
+
+/*
+ internal prototypes
+*/
+int tdb_munmap(struct tdb_context *tdb);
+int tdb_mmap(struct tdb_context *tdb);
+int tdb_lock(struct tdb_context *tdb, int list, int ltype);
+int tdb_lock_nonblock(struct tdb_context *tdb, int list, int ltype);
+int tdb_nest_lock(struct tdb_context *tdb, uint32_t offset, int ltype,
+ enum tdb_lock_flags flags);
+int tdb_nest_unlock(struct tdb_context *tdb, uint32_t offset, int ltype,
+ bool mark_lock);
+int tdb_unlock(struct tdb_context *tdb, int list, int ltype);
+int tdb_brlock(struct tdb_context *tdb,
+ int rw_type, tdb_off_t offset, size_t len,
+ enum tdb_lock_flags flags);
+int tdb_brunlock(struct tdb_context *tdb,
+ int rw_type, tdb_off_t offset, size_t len);
+bool tdb_have_extra_locks(struct tdb_context *tdb);
+void tdb_release_transaction_locks(struct tdb_context *tdb);
+int tdb_transaction_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags lockflags);
+int tdb_transaction_unlock(struct tdb_context *tdb, int ltype);
+int tdb_recovery_area(struct tdb_context *tdb,
+ const struct tdb_methods *methods,
+ tdb_off_t *recovery_offset,
+ struct tdb_record *rec);
+int tdb_allrecord_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags, bool upgradable);
+int tdb_allrecord_unlock(struct tdb_context *tdb, int ltype, bool mark_lock);
+int tdb_allrecord_upgrade(struct tdb_context *tdb);
+int tdb_write_lock_record(struct tdb_context *tdb, tdb_off_t off);
+int tdb_write_unlock_record(struct tdb_context *tdb, tdb_off_t off);
+int tdb_ofs_read(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d);
+int tdb_ofs_write(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d);
+void *tdb_convert(void *buf, uint32_t size);
+int tdb_free(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec);
+tdb_off_t tdb_allocate(struct tdb_context *tdb, int hash, tdb_len_t length,
+ struct tdb_record *rec);
+int tdb_ofs_read(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d);
+int tdb_ofs_write(struct tdb_context *tdb, tdb_off_t offset, tdb_off_t *d);
+int tdb_lock_record(struct tdb_context *tdb, tdb_off_t off);
+int tdb_unlock_record(struct tdb_context *tdb, tdb_off_t off);
+bool tdb_needs_recovery(struct tdb_context *tdb);
+int tdb_rec_read(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec);
+int tdb_rec_write(struct tdb_context *tdb, tdb_off_t offset, struct tdb_record *rec);
+int tdb_do_delete(struct tdb_context *tdb, tdb_off_t rec_ptr, struct tdb_record *rec);
+unsigned char *tdb_alloc_read(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t len);
+int tdb_parse_data(struct tdb_context *tdb, TDB_DATA key,
+ tdb_off_t offset, tdb_len_t len,
+ int (*parser)(TDB_DATA key, TDB_DATA data,
+ void *private_data),
+ void *private_data);
+tdb_off_t tdb_find_lock_hash(struct tdb_context *tdb, TDB_DATA key, uint32_t hash, int locktype,
+ struct tdb_record *rec);
+tdb_off_t tdb_find_dead(struct tdb_context *tdb, uint32_t hash,
+ struct tdb_record *r, tdb_len_t length,
+ tdb_off_t *p_last_ptr);
+int tdb_purge_dead(struct tdb_context *tdb, uint32_t hash);
+void tdb_io_init(struct tdb_context *tdb);
+int tdb_expand(struct tdb_context *tdb, tdb_off_t size);
+tdb_off_t tdb_expand_adjust(tdb_off_t map_size, tdb_off_t size, int page_size);
+int tdb_rec_free_read(struct tdb_context *tdb, tdb_off_t off,
+ struct tdb_record *rec);
+bool tdb_write_all(int fd, const void *buf, size_t count);
+int tdb_transaction_recover(struct tdb_context *tdb);
+void tdb_header_hash(struct tdb_context *tdb,
+ uint32_t *magic1_hash, uint32_t *magic2_hash);
+unsigned int tdb_old_hash(TDB_DATA *key);
+size_t tdb_dead_space(struct tdb_context *tdb, tdb_off_t off);
+bool tdb_add_off_t(tdb_off_t a, tdb_off_t b, tdb_off_t *pret);
+
+/* tdb_off_t and tdb_len_t right now are both uint32_t */
+#define tdb_add_len_t tdb_add_off_t
+
+size_t tdb_mutex_size(struct tdb_context *tdb);
+bool tdb_have_mutexes(struct tdb_context *tdb);
+int tdb_mutex_init(struct tdb_context *tdb);
+int tdb_mutex_mmap(struct tdb_context *tdb);
+int tdb_mutex_munmap(struct tdb_context *tdb);
+bool tdb_mutex_lock(struct tdb_context *tdb, int rw, off_t off, off_t len,
+ bool waitflag, int *pret);
+bool tdb_mutex_unlock(struct tdb_context *tdb, int rw, off_t off, off_t len,
+ int *pret);
+int tdb_mutex_allrecord_lock(struct tdb_context *tdb, int ltype,
+ enum tdb_lock_flags flags);
+int tdb_mutex_allrecord_unlock(struct tdb_context *tdb);
+int tdb_mutex_allrecord_upgrade(struct tdb_context *tdb);
+void tdb_mutex_allrecord_downgrade(struct tdb_context *tdb);
+
+#endif /* TDB_PRIVATE_H */
diff --git a/common/transaction.c b/common/transaction.c
new file mode 100644
index 0000000..0dd057b
--- /dev/null
+++ b/common/transaction.c
@@ -0,0 +1,1312 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 2005
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+/*
+ transaction design:
+
+ - only allow a single transaction at a time per database. This makes
+ using the transaction API simpler, as otherwise the caller would
+ have to cope with temporary failures in transactions that conflict
+ with other current transactions
+
+ - keep the transaction recovery information in the same file as the
+ database, using a special 'transaction recovery' record pointed at
+ by the header. This removes the need for extra journal files as
+ used by some other databases
+
+ - dynamically allocated the transaction recover record, re-using it
+ for subsequent transactions. If a larger record is needed then
+ tdb_free() the old record to place it on the normal tdb freelist
+ before allocating the new record
+
+ - during transactions, keep a linked list of writes all that have
+ been performed by intercepting all tdb_write() calls. The hooked
+ transaction versions of tdb_read() and tdb_write() check this
+ linked list and try to use the elements of the list in preference
+ to the real database.
+
+ - don't allow any locks to be held when a transaction starts,
+ otherwise we can end up with deadlock (plus lack of lock nesting
+ in posix locks would mean the lock is lost)
+
+ - if the caller gains a lock during the transaction but doesn't
+ release it then fail the commit
+
+ - allow for nested calls to tdb_transaction_start(), re-using the
+ existing transaction record. If the inner transaction is cancelled
+ then a subsequent commit will fail
+
+ - keep a mirrored copy of the tdb hash chain heads to allow for the
+ fast hash heads scan on traverse, updating the mirrored copy in
+ the transaction version of tdb_write
+
+ - allow callers to mix transaction and non-transaction use of tdb,
+ although once a transaction is started then an exclusive lock is
+ gained until the transaction is committed or cancelled
+
+ - the commit stategy involves first saving away all modified data
+ into a linearised buffer in the transaction recovery area, then
+ marking the transaction recovery area with a magic value to
+ indicate a valid recovery record. In total 4 fsync/msync calls are
+ needed per commit to prevent race conditions. It might be possible
+ to reduce this to 3 or even 2 with some more work.
+
+ - check for a valid recovery record on open of the tdb, while the
+ open lock is held. Automatically recover from the transaction
+ recovery area if needed, then continue with the open as
+ usual. This allows for smooth crash recovery with no administrator
+ intervention.
+
+ - if TDB_NOSYNC is passed to flags in tdb_open then transactions are
+ still available, but no fsync/msync calls are made. This means we
+ are still proof against a process dying during transaction commit,
+ but not against machine reboot.
+
+ - if TDB_ALLOW_NESTING is passed to flags in tdb open, or added using
+ tdb_add_flags() transaction nesting is enabled.
+ It resets the TDB_DISALLOW_NESTING flag, as both cannot be used together.
+ The default is that transaction nesting is allowed.
+ Note: this default may change in future versions of tdb.
+
+ Beware. when transactions are nested a transaction successfully
+ completed with tdb_transaction_commit() can be silently unrolled later.
+
+ - if TDB_DISALLOW_NESTING is passed to flags in tdb open, or added using
+ tdb_add_flags() transaction nesting is disabled.
+ It resets the TDB_ALLOW_NESTING flag, as both cannot be used together.
+ An attempt create a nested transaction will fail with TDB_ERR_NESTING.
+ The default is that transaction nesting is allowed.
+ Note: this default may change in future versions of tdb.
+*/
+
+
+/*
+ hold the context of any current transaction
+*/
+struct tdb_transaction {
+ /* we keep a mirrored copy of the tdb hash heads here so
+ tdb_next_hash_chain() can operate efficiently */
+ uint32_t *hash_heads;
+
+ /* the original io methods - used to do IOs to the real db */
+ const struct tdb_methods *io_methods;
+
+ /* the list of transaction blocks. When a block is first
+ written to, it gets created in this list */
+ uint8_t **blocks;
+ uint32_t num_blocks;
+ uint32_t block_size; /* bytes in each block */
+ uint32_t last_block_size; /* number of valid bytes in the last block */
+
+ /* non-zero when an internal transaction error has
+ occurred. All write operations will then fail until the
+ transaction is ended */
+ int transaction_error;
+
+ /* when inside a transaction we need to keep track of any
+ nested tdb_transaction_start() calls, as these are allowed,
+ but don't create a new transaction */
+ int nesting;
+
+ /* set when a prepare has already occurred */
+ bool prepared;
+ tdb_off_t magic_offset;
+
+ /* old file size before transaction */
+ tdb_len_t old_map_size;
+
+ /* did we expand in this transaction */
+ bool expanded;
+};
+
+
+/*
+ read while in a transaction. We need to check first if the data is in our list
+ of transaction elements, then if not do a real read
+*/
+static int transaction_read(struct tdb_context *tdb, tdb_off_t off, void *buf,
+ tdb_len_t len, int cv)
+{
+ uint32_t blk;
+
+ /* break it down into block sized ops */
+ while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) {
+ tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size);
+ if (transaction_read(tdb, off, buf, len2, cv) != 0) {
+ return -1;
+ }
+ len -= len2;
+ off += len2;
+ buf = (void *)(len2 + (char *)buf);
+ }
+
+ if (len == 0) {
+ return 0;
+ }
+
+ blk = off / tdb->transaction->block_size;
+
+ /* see if we have it in the block list */
+ if (tdb->transaction->num_blocks <= blk ||
+ tdb->transaction->blocks[blk] == NULL) {
+ /* nope, do a real read */
+ if (tdb->transaction->io_methods->tdb_read(tdb, off, buf, len, cv) != 0) {
+ goto fail;
+ }
+ return 0;
+ }
+
+ /* it is in the block list. Now check for the last block */
+ if (blk == tdb->transaction->num_blocks-1) {
+ if (len > tdb->transaction->last_block_size) {
+ goto fail;
+ }
+ }
+
+ /* now copy it out of this block */
+ memcpy(buf, tdb->transaction->blocks[blk] + (off % tdb->transaction->block_size), len);
+ if (cv) {
+ tdb_convert(buf, len);
+ }
+ return 0;
+
+fail:
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_read: failed at off=%u len=%u\n", off, len));
+ tdb->ecode = TDB_ERR_IO;
+ tdb->transaction->transaction_error = 1;
+ return -1;
+}
+
+
+/*
+ write while in a transaction
+*/
+static int transaction_write(struct tdb_context *tdb, tdb_off_t off,
+ const void *buf, tdb_len_t len)
+{
+ uint32_t blk;
+
+ /* Only a commit is allowed on a prepared transaction */
+ if (tdb->transaction->prepared) {
+ tdb->ecode = TDB_ERR_EINVAL;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_write: transaction already prepared, write not allowed\n"));
+ tdb->transaction->transaction_error = 1;
+ return -1;
+ }
+
+ /* if the write is to a hash head, then update the transaction
+ hash heads */
+ if (len == sizeof(tdb_off_t) && off >= FREELIST_TOP &&
+ off < FREELIST_TOP+TDB_HASHTABLE_SIZE(tdb)) {
+ uint32_t chain = (off-FREELIST_TOP) / sizeof(tdb_off_t);
+ memcpy(&tdb->transaction->hash_heads[chain], buf, len);
+ }
+
+ /* break it up into block sized chunks */
+ while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) {
+ tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size);
+ if (transaction_write(tdb, off, buf, len2) != 0) {
+ return -1;
+ }
+ len -= len2;
+ off += len2;
+ if (buf != NULL) {
+ buf = (const void *)(len2 + (const char *)buf);
+ }
+ }
+
+ if (len == 0) {
+ return 0;
+ }
+
+ blk = off / tdb->transaction->block_size;
+ off = off % tdb->transaction->block_size;
+
+ if (tdb->transaction->num_blocks <= blk) {
+ uint8_t **new_blocks;
+ /* expand the blocks array */
+ new_blocks = (uint8_t **)realloc(tdb->transaction->blocks,
+ (blk+1)*sizeof(uint8_t *));
+ if (new_blocks == NULL) {
+ tdb->ecode = TDB_ERR_OOM;
+ goto fail;
+ }
+ memset(&new_blocks[tdb->transaction->num_blocks], 0,
+ (1+(blk - tdb->transaction->num_blocks))*sizeof(uint8_t *));
+ tdb->transaction->blocks = new_blocks;
+ tdb->transaction->num_blocks = blk+1;
+ tdb->transaction->last_block_size = 0;
+ }
+
+ /* allocate and fill a block? */
+ if (tdb->transaction->blocks[blk] == NULL) {
+ tdb->transaction->blocks[blk] = (uint8_t *)calloc(tdb->transaction->block_size, 1);
+ if (tdb->transaction->blocks[blk] == NULL) {
+ tdb->ecode = TDB_ERR_OOM;
+ tdb->transaction->transaction_error = 1;
+ return -1;
+ }
+ if (tdb->transaction->old_map_size > blk * tdb->transaction->block_size) {
+ tdb_len_t len2 = tdb->transaction->block_size;
+ if (len2 + (blk * tdb->transaction->block_size) > tdb->transaction->old_map_size) {
+ len2 = tdb->transaction->old_map_size - (blk * tdb->transaction->block_size);
+ }
+ if (tdb->transaction->io_methods->tdb_read(tdb, blk * tdb->transaction->block_size,
+ tdb->transaction->blocks[blk],
+ len2, 0) != 0) {
+ SAFE_FREE(tdb->transaction->blocks[blk]);
+ tdb->ecode = TDB_ERR_IO;
+ goto fail;
+ }
+ if (blk == tdb->transaction->num_blocks-1) {
+ tdb->transaction->last_block_size = len2;
+ }
+ }
+ }
+
+ /* overwrite part of an existing block */
+ if (buf == NULL) {
+ memset(tdb->transaction->blocks[blk] + off, 0, len);
+ } else {
+ memcpy(tdb->transaction->blocks[blk] + off, buf, len);
+ }
+ if (blk == tdb->transaction->num_blocks-1) {
+ if (len + off > tdb->transaction->last_block_size) {
+ tdb->transaction->last_block_size = len + off;
+ }
+ }
+
+ return 0;
+
+fail:
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "transaction_write: failed at off=%u len=%u\n",
+ (blk*tdb->transaction->block_size) + off, len));
+ tdb->transaction->transaction_error = 1;
+ return -1;
+}
+
+
+/*
+ write while in a transaction - this variant never expands the transaction blocks, it only
+ updates existing blocks. This means it cannot change the recovery size
+*/
+static int transaction_write_existing(struct tdb_context *tdb, tdb_off_t off,
+ const void *buf, tdb_len_t len)
+{
+ uint32_t blk;
+
+ /* break it up into block sized chunks */
+ while (len + (off % tdb->transaction->block_size) > tdb->transaction->block_size) {
+ tdb_len_t len2 = tdb->transaction->block_size - (off % tdb->transaction->block_size);
+ if (transaction_write_existing(tdb, off, buf, len2) != 0) {
+ return -1;
+ }
+ len -= len2;
+ off += len2;
+ if (buf != NULL) {
+ buf = (const void *)(len2 + (const char *)buf);
+ }
+ }
+
+ if (len == 0) {
+ return 0;
+ }
+
+ blk = off / tdb->transaction->block_size;
+ off = off % tdb->transaction->block_size;
+
+ if (tdb->transaction->num_blocks <= blk ||
+ tdb->transaction->blocks[blk] == NULL) {
+ return 0;
+ }
+
+ if (blk == tdb->transaction->num_blocks-1 &&
+ off + len > tdb->transaction->last_block_size) {
+ if (off >= tdb->transaction->last_block_size) {
+ return 0;
+ }
+ len = tdb->transaction->last_block_size - off;
+ }
+
+ /* overwrite part of an existing block */
+ memcpy(tdb->transaction->blocks[blk] + off, buf, len);
+
+ return 0;
+}
+
+
+/*
+ accelerated hash chain head search, using the cached hash heads
+*/
+static void transaction_next_hash_chain(struct tdb_context *tdb, uint32_t *chain)
+{
+ uint32_t h = *chain;
+ for (;h < tdb->hash_size;h++) {
+ /* the +1 takes account of the freelist */
+ if (0 != tdb->transaction->hash_heads[h+1]) {
+ break;
+ }
+ }
+ (*chain) = h;
+}
+
+/*
+ out of bounds check during a transaction
+*/
+static int transaction_oob(struct tdb_context *tdb, tdb_off_t off,
+ tdb_len_t len, int probe)
+{
+ if (off + len >= off && off + len <= tdb->map_size) {
+ return 0;
+ }
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+}
+
+/*
+ transaction version of tdb_expand().
+*/
+static int transaction_expand_file(struct tdb_context *tdb, tdb_off_t size,
+ tdb_off_t addition)
+{
+ /* add a write to the transaction elements, so subsequent
+ reads see the zero data */
+ if (transaction_write(tdb, size, NULL, addition) != 0) {
+ return -1;
+ }
+
+ tdb->transaction->expanded = true;
+
+ return 0;
+}
+
+static const struct tdb_methods transaction_methods = {
+ transaction_read,
+ transaction_write,
+ transaction_next_hash_chain,
+ transaction_oob,
+ transaction_expand_file,
+};
+
+
+/*
+ start a tdb transaction. No token is returned, as only a single
+ transaction is allowed to be pending per tdb_context
+*/
+static int _tdb_transaction_start(struct tdb_context *tdb,
+ enum tdb_lock_flags lockflags)
+{
+ /* some sanity checks */
+ if (tdb->read_only || (tdb->flags & TDB_INTERNAL)
+ || tdb->traverse_read) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: cannot start a transaction on a read-only or internal db\n"));
+ tdb->ecode = TDB_ERR_EINVAL;
+ return -1;
+ }
+
+ /* cope with nested tdb_transaction_start() calls */
+ if (tdb->transaction != NULL) {
+ if (!(tdb->flags & TDB_ALLOW_NESTING)) {
+ tdb->ecode = TDB_ERR_NESTING;
+ return -1;
+ }
+ tdb->transaction->nesting++;
+ TDB_LOG((tdb, TDB_DEBUG_TRACE, "tdb_transaction_start: nesting %d\n",
+ tdb->transaction->nesting));
+ return 0;
+ }
+
+ if (tdb_have_extra_locks(tdb)) {
+ /* the caller must not have any locks when starting a
+ transaction as otherwise we'll be screwed by lack
+ of nested locks in posix */
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: cannot start a transaction with locks held\n"));
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ if (tdb->travlocks.next != NULL) {
+ /* you cannot use transactions inside a traverse (although you can use
+ traverse inside a transaction) as otherwise you can end up with
+ deadlock */
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: cannot start a transaction within a traverse\n"));
+ tdb->ecode = TDB_ERR_LOCK;
+ return -1;
+ }
+
+ tdb->transaction = (struct tdb_transaction *)
+ calloc(sizeof(struct tdb_transaction), 1);
+ if (tdb->transaction == NULL) {
+ tdb->ecode = TDB_ERR_OOM;
+ return -1;
+ }
+
+ /* a page at a time seems like a reasonable compromise between compactness and efficiency */
+ tdb->transaction->block_size = tdb->page_size;
+
+ /* get the transaction write lock. This is a blocking lock. As
+ discussed with Volker, there are a number of ways we could
+ make this async, which we will probably do in the future */
+ if (tdb_transaction_lock(tdb, F_WRLCK, lockflags) == -1) {
+ SAFE_FREE(tdb->transaction->blocks);
+ SAFE_FREE(tdb->transaction);
+ if ((lockflags & TDB_LOCK_WAIT) == 0) {
+ tdb->ecode = TDB_ERR_NOLOCK;
+ }
+ return -1;
+ }
+
+ /* get a read lock from the freelist to the end of file. This
+ is upgraded to a write lock during the commit */
+ if (tdb_allrecord_lock(tdb, F_RDLCK, TDB_LOCK_WAIT, true) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_start: failed to get hash locks\n"));
+ goto fail_allrecord_lock;
+ }
+
+ /* setup a copy of the hash table heads so the hash scan in
+ traverse can be fast */
+ tdb->transaction->hash_heads = (uint32_t *)
+ calloc(tdb->hash_size+1, sizeof(uint32_t));
+ if (tdb->transaction->hash_heads == NULL) {
+ tdb->ecode = TDB_ERR_OOM;
+ goto fail;
+ }
+ if (tdb->methods->tdb_read(tdb, FREELIST_TOP, tdb->transaction->hash_heads,
+ TDB_HASHTABLE_SIZE(tdb), 0) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_start: failed to read hash heads\n"));
+ tdb->ecode = TDB_ERR_IO;
+ goto fail;
+ }
+
+ /* make sure we know about any file expansions already done by
+ anyone else */
+ tdb->methods->tdb_oob(tdb, tdb->map_size, 1, 1);
+ tdb->transaction->old_map_size = tdb->map_size;
+
+ /* finally hook the io methods, replacing them with
+ transaction specific methods */
+ tdb->transaction->io_methods = tdb->methods;
+ tdb->methods = &transaction_methods;
+
+ /* Trace at the end, so we get sequence number correct. */
+ tdb_trace(tdb, "tdb_transaction_start");
+ return 0;
+
+fail:
+ tdb_allrecord_unlock(tdb, F_RDLCK, false);
+fail_allrecord_lock:
+ tdb_transaction_unlock(tdb, F_WRLCK);
+ SAFE_FREE(tdb->transaction->blocks);
+ SAFE_FREE(tdb->transaction->hash_heads);
+ SAFE_FREE(tdb->transaction);
+ return -1;
+}
+
+_PUBLIC_ int tdb_transaction_start(struct tdb_context *tdb)
+{
+ return _tdb_transaction_start(tdb, TDB_LOCK_WAIT);
+}
+
+_PUBLIC_ int tdb_transaction_start_nonblock(struct tdb_context *tdb)
+{
+ return _tdb_transaction_start(tdb, TDB_LOCK_NOWAIT|TDB_LOCK_PROBE);
+}
+
+/*
+ sync to disk
+*/
+static int transaction_sync(struct tdb_context *tdb, tdb_off_t offset, tdb_len_t length)
+{
+ if (tdb->flags & TDB_NOSYNC) {
+ return 0;
+ }
+
+#ifdef HAVE_FDATASYNC
+ if (fdatasync(tdb->fd) != 0) {
+#else
+ if (fsync(tdb->fd) != 0) {
+#endif
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction: fsync failed\n"));
+ return -1;
+ }
+#ifdef HAVE_MMAP
+ if (tdb->map_ptr) {
+ tdb_off_t moffset = offset & ~(tdb->page_size-1);
+ if (msync(moffset + (char *)tdb->map_ptr,
+ length + (offset - moffset), MS_SYNC) != 0) {
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction: msync failed - %s\n",
+ strerror(errno)));
+ return -1;
+ }
+ }
+#endif
+ return 0;
+}
+
+
+static int _tdb_transaction_cancel(struct tdb_context *tdb)
+{
+ int i, ret = 0;
+
+ if (tdb->transaction == NULL) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_cancel: no transaction\n"));
+ return -1;
+ }
+
+ if (tdb->transaction->nesting != 0) {
+ tdb->transaction->transaction_error = 1;
+ tdb->transaction->nesting--;
+ return 0;
+ }
+
+ tdb->map_size = tdb->transaction->old_map_size;
+
+ /* free all the transaction blocks */
+ for (i=0;i<tdb->transaction->num_blocks;i++) {
+ if (tdb->transaction->blocks[i] != NULL) {
+ free(tdb->transaction->blocks[i]);
+ }
+ }
+ SAFE_FREE(tdb->transaction->blocks);
+
+ if (tdb->transaction->magic_offset) {
+ const struct tdb_methods *methods = tdb->transaction->io_methods;
+ const uint32_t invalid = TDB_RECOVERY_INVALID_MAGIC;
+
+ /* remove the recovery marker */
+ if (methods->tdb_write(tdb, tdb->transaction->magic_offset, &invalid, 4) == -1 ||
+ transaction_sync(tdb, tdb->transaction->magic_offset, 4) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_cancel: failed to remove recovery magic\n"));
+ ret = -1;
+ }
+ }
+
+ /* This also removes the OPEN_LOCK, if we have it. */
+ tdb_release_transaction_locks(tdb);
+
+ /* restore the normal io methods */
+ tdb->methods = tdb->transaction->io_methods;
+
+ SAFE_FREE(tdb->transaction->hash_heads);
+ SAFE_FREE(tdb->transaction);
+
+ return ret;
+}
+
+/*
+ cancel the current transaction
+*/
+_PUBLIC_ int tdb_transaction_cancel(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_transaction_cancel");
+ return _tdb_transaction_cancel(tdb);
+}
+
+/*
+ work out how much space the linearised recovery data will consume
+*/
+static bool tdb_recovery_size(struct tdb_context *tdb, tdb_len_t *result)
+{
+ tdb_len_t recovery_size = 0;
+ int i;
+
+ recovery_size = sizeof(uint32_t);
+ for (i=0;i<tdb->transaction->num_blocks;i++) {
+ tdb_len_t block_size;
+ if (i * tdb->transaction->block_size >= tdb->transaction->old_map_size) {
+ break;
+ }
+ if (tdb->transaction->blocks[i] == NULL) {
+ continue;
+ }
+ if (!tdb_add_len_t(recovery_size, 2*sizeof(tdb_off_t),
+ &recovery_size)) {
+ return false;
+ }
+ if (i == tdb->transaction->num_blocks-1) {
+ block_size = tdb->transaction->last_block_size;
+ } else {
+ block_size = tdb->transaction->block_size;
+ }
+ if (!tdb_add_len_t(recovery_size, block_size,
+ &recovery_size)) {
+ return false;
+ }
+ }
+
+ *result = recovery_size;
+ return true;
+}
+
+int tdb_recovery_area(struct tdb_context *tdb,
+ const struct tdb_methods *methods,
+ tdb_off_t *recovery_offset,
+ struct tdb_record *rec)
+{
+ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, recovery_offset) == -1) {
+ return -1;
+ }
+
+ if (*recovery_offset == 0) {
+ rec->rec_len = 0;
+ return 0;
+ }
+
+ if (methods->tdb_read(tdb, *recovery_offset, rec, sizeof(*rec),
+ DOCONV()) == -1) {
+ return -1;
+ }
+
+ /* ignore invalid recovery regions: can happen in crash */
+ if (rec->magic != TDB_RECOVERY_MAGIC &&
+ rec->magic != TDB_RECOVERY_INVALID_MAGIC) {
+ *recovery_offset = 0;
+ rec->rec_len = 0;
+ }
+ return 0;
+}
+
+/*
+ allocate the recovery area, or use an existing recovery area if it is
+ large enough
+*/
+static int tdb_recovery_allocate(struct tdb_context *tdb,
+ tdb_len_t *recovery_size,
+ tdb_off_t *recovery_offset,
+ tdb_len_t *recovery_max_size)
+{
+ struct tdb_record rec;
+ const struct tdb_methods *methods = tdb->transaction->io_methods;
+ tdb_off_t recovery_head, new_end;
+
+ if (tdb_recovery_area(tdb, methods, &recovery_head, &rec) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to read recovery head\n"));
+ return -1;
+ }
+
+ if (!tdb_recovery_size(tdb, recovery_size)) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: "
+ "overflow recovery size\n"));
+ return -1;
+ }
+
+ /* Existing recovery area? */
+ if (recovery_head != 0 && *recovery_size <= rec.rec_len) {
+ /* it fits in the existing area */
+ *recovery_max_size = rec.rec_len;
+ *recovery_offset = recovery_head;
+ return 0;
+ }
+
+ /* If recovery area in middle of file, we need a new one. */
+ if (recovery_head == 0
+ || recovery_head + sizeof(rec) + rec.rec_len != tdb->map_size) {
+ /* we need to free up the old recovery area, then allocate a
+ new one at the end of the file. Note that we cannot use
+ tdb_allocate() to allocate the new one as that might return
+ us an area that is being currently used (as of the start of
+ the transaction) */
+ if (recovery_head) {
+ if (tdb_free(tdb, recovery_head, &rec) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,
+ "tdb_recovery_allocate: failed to"
+ " free previous recovery area\n"));
+ return -1;
+ }
+
+ /* the tdb_free() call might have increased
+ * the recovery size */
+ if (!tdb_recovery_size(tdb, recovery_size)) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL,
+ "tdb_recovery_allocate: "
+ "overflow recovery size\n"));
+ return -1;
+ }
+ }
+
+ /* New head will be at end of file. */
+ recovery_head = tdb->map_size;
+ }
+
+ /* Now we know where it will be. */
+ *recovery_offset = recovery_head;
+
+ /* Expand by more than we need, so we don't do it often. */
+ *recovery_max_size = tdb_expand_adjust(tdb->map_size,
+ *recovery_size,
+ tdb->page_size)
+ - sizeof(rec);
+
+ if (!tdb_add_off_t(recovery_head, sizeof(rec), &new_end) ||
+ !tdb_add_off_t(new_end, *recovery_max_size, &new_end)) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: "
+ "overflow recovery area\n"));
+ return -1;
+ }
+
+ if (methods->tdb_expand_file(tdb, tdb->transaction->old_map_size,
+ new_end - tdb->transaction->old_map_size)
+ == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to create recovery area\n"));
+ return -1;
+ }
+
+ /* remap the file (if using mmap) */
+ methods->tdb_oob(tdb, tdb->map_size, 1, 1);
+
+ /* we have to reset the old map size so that we don't try to expand the file
+ again in the transaction commit, which would destroy the recovery area */
+ tdb->transaction->old_map_size = tdb->map_size;
+
+ /* write the recovery header offset and sync - we can sync without a race here
+ as the magic ptr in the recovery record has not been set */
+ CONVERT(recovery_head);
+ if (methods->tdb_write(tdb, TDB_RECOVERY_HEAD,
+ &recovery_head, sizeof(tdb_off_t)) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to write recovery head\n"));
+ return -1;
+ }
+ if (transaction_write_existing(tdb, TDB_RECOVERY_HEAD, &recovery_head, sizeof(tdb_off_t)) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_recovery_allocate: failed to write recovery head\n"));
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/*
+ setup the recovery data that will be used on a crash during commit
+*/
+static int transaction_setup_recovery(struct tdb_context *tdb,
+ tdb_off_t *magic_offset)
+{
+ tdb_len_t recovery_size;
+ unsigned char *data, *p;
+ const struct tdb_methods *methods = tdb->transaction->io_methods;
+ struct tdb_record *rec;
+ tdb_off_t recovery_offset, recovery_max_size;
+ tdb_off_t old_map_size = tdb->transaction->old_map_size;
+ uint32_t magic, tailer;
+ int i;
+
+ /*
+ check that the recovery area has enough space
+ */
+ if (tdb_recovery_allocate(tdb, &recovery_size,
+ &recovery_offset, &recovery_max_size) == -1) {
+ return -1;
+ }
+
+ data = (unsigned char *)malloc(recovery_size + sizeof(*rec));
+ if (data == NULL) {
+ tdb->ecode = TDB_ERR_OOM;
+ return -1;
+ }
+
+ rec = (struct tdb_record *)data;
+ memset(rec, 0, sizeof(*rec));
+
+ rec->magic = TDB_RECOVERY_INVALID_MAGIC;
+ rec->data_len = recovery_size;
+ rec->rec_len = recovery_max_size;
+ rec->key_len = old_map_size;
+ CONVERT(*rec);
+
+ /* build the recovery data into a single blob to allow us to do a single
+ large write, which should be more efficient */
+ p = data + sizeof(*rec);
+ for (i=0;i<tdb->transaction->num_blocks;i++) {
+ tdb_off_t offset;
+ tdb_len_t length;
+
+ if (tdb->transaction->blocks[i] == NULL) {
+ continue;
+ }
+
+ offset = i * tdb->transaction->block_size;
+ length = tdb->transaction->block_size;
+ if (i == tdb->transaction->num_blocks-1) {
+ length = tdb->transaction->last_block_size;
+ }
+
+ if (offset >= old_map_size) {
+ continue;
+ }
+ if (offset + length > tdb->transaction->old_map_size) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: transaction data over new region boundary\n"));
+ free(data);
+ tdb->ecode = TDB_ERR_CORRUPT;
+ return -1;
+ }
+ memcpy(p, &offset, 4);
+ memcpy(p+4, &length, 4);
+ if (DOCONV()) {
+ tdb_convert(p, 8);
+ }
+ /* the recovery area contains the old data, not the
+ new data, so we have to call the original tdb_read
+ method to get it */
+ if (methods->tdb_read(tdb, offset, p + 8, length, 0) != 0) {
+ free(data);
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+ p += 8 + length;
+ }
+
+ /* and the tailer */
+ tailer = sizeof(*rec) + recovery_max_size;
+ memcpy(p, &tailer, 4);
+ if (DOCONV()) {
+ tdb_convert(p, 4);
+ }
+
+ /* write the recovery data to the recovery area */
+ if (methods->tdb_write(tdb, recovery_offset, data, sizeof(*rec) + recovery_size) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write recovery data\n"));
+ free(data);
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+ if (transaction_write_existing(tdb, recovery_offset, data, sizeof(*rec) + recovery_size) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write secondary recovery data\n"));
+ free(data);
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ /* as we don't have ordered writes, we have to sync the recovery
+ data before we update the magic to indicate that the recovery
+ data is present */
+ if (transaction_sync(tdb, recovery_offset, sizeof(*rec) + recovery_size) == -1) {
+ free(data);
+ return -1;
+ }
+
+ free(data);
+
+ magic = TDB_RECOVERY_MAGIC;
+ CONVERT(magic);
+
+ *magic_offset = recovery_offset + offsetof(struct tdb_record, magic);
+
+ if (methods->tdb_write(tdb, *magic_offset, &magic, sizeof(magic)) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write recovery magic\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+ if (transaction_write_existing(tdb, *magic_offset, &magic, sizeof(magic)) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_setup_recovery: failed to write secondary recovery magic\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ /* ensure the recovery magic marker is on disk */
+ if (transaction_sync(tdb, *magic_offset, sizeof(magic)) == -1) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int _tdb_transaction_prepare_commit(struct tdb_context *tdb)
+{
+ const struct tdb_methods *methods;
+
+ if (tdb->transaction == NULL) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: no transaction\n"));
+ return -1;
+ }
+
+ if (tdb->transaction->prepared) {
+ tdb->ecode = TDB_ERR_EINVAL;
+ _tdb_transaction_cancel(tdb);
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: transaction already prepared\n"));
+ return -1;
+ }
+
+ if (tdb->transaction->transaction_error) {
+ tdb->ecode = TDB_ERR_IO;
+ _tdb_transaction_cancel(tdb);
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: transaction error pending\n"));
+ return -1;
+ }
+
+
+ if (tdb->transaction->nesting != 0) {
+ return 0;
+ }
+
+ /* check for a null transaction */
+ if (tdb->transaction->blocks == NULL) {
+ return 0;
+ }
+
+ methods = tdb->transaction->io_methods;
+
+ /* if there are any locks pending then the caller has not
+ nested their locks properly, so fail the transaction */
+ if (tdb_have_extra_locks(tdb)) {
+ tdb->ecode = TDB_ERR_LOCK;
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: locks pending on commit\n"));
+ _tdb_transaction_cancel(tdb);
+ return -1;
+ }
+
+ /* upgrade the main transaction lock region to a write lock */
+ if (tdb_allrecord_upgrade(tdb) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: failed to upgrade hash locks\n"));
+ _tdb_transaction_cancel(tdb);
+ return -1;
+ }
+
+ /* get the open lock - this prevents new users attaching to the database
+ during the commit */
+ if (tdb_nest_lock(tdb, OPEN_LOCK, F_WRLCK, TDB_LOCK_WAIT) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_prepare_commit: failed to get open lock\n"));
+ _tdb_transaction_cancel(tdb);
+ return -1;
+ }
+
+ /* write the recovery data to the end of the file */
+ if (transaction_setup_recovery(tdb, &tdb->transaction->magic_offset) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_prepare_commit: failed to setup recovery data\n"));
+ _tdb_transaction_cancel(tdb);
+ return -1;
+ }
+
+ tdb->transaction->prepared = true;
+
+ /* expand the file to the new size if needed */
+ if (tdb->map_size != tdb->transaction->old_map_size) {
+ if (methods->tdb_expand_file(tdb, tdb->transaction->old_map_size,
+ tdb->map_size -
+ tdb->transaction->old_map_size) == -1) {
+ tdb->ecode = TDB_ERR_IO;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_prepare_commit: expansion failed\n"));
+ _tdb_transaction_cancel(tdb);
+ return -1;
+ }
+ tdb->map_size = tdb->transaction->old_map_size;
+ methods->tdb_oob(tdb, tdb->map_size, 1, 1);
+ }
+
+ /* Keep the open lock until the actual commit */
+
+ return 0;
+}
+
+/*
+ prepare to commit the current transaction
+*/
+_PUBLIC_ int tdb_transaction_prepare_commit(struct tdb_context *tdb)
+{
+ tdb_trace(tdb, "tdb_transaction_prepare_commit");
+ return _tdb_transaction_prepare_commit(tdb);
+}
+
+/* A repack is worthwhile if the largest is less than half total free. */
+static bool repack_worthwhile(struct tdb_context *tdb)
+{
+ tdb_off_t ptr;
+ struct tdb_record rec;
+ tdb_len_t total = 0, largest = 0;
+
+ if (tdb_ofs_read(tdb, FREELIST_TOP, &ptr) == -1) {
+ return false;
+ }
+
+ while (ptr != 0 && tdb_rec_free_read(tdb, ptr, &rec) == 0) {
+ total += rec.rec_len;
+ if (rec.rec_len > largest) {
+ largest = rec.rec_len;
+ }
+ ptr = rec.next;
+ }
+
+ return total > largest * 2;
+}
+
+/*
+ commit the current transaction
+*/
+_PUBLIC_ int tdb_transaction_commit(struct tdb_context *tdb)
+{
+ const struct tdb_methods *methods;
+ int i;
+ bool need_repack = false;
+
+ if (tdb->transaction == NULL) {
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_commit: no transaction\n"));
+ return -1;
+ }
+
+ tdb_trace(tdb, "tdb_transaction_commit");
+
+ if (tdb->transaction->transaction_error) {
+ tdb->ecode = TDB_ERR_IO;
+ _tdb_transaction_cancel(tdb);
+ TDB_LOG((tdb, TDB_DEBUG_ERROR, "tdb_transaction_commit: transaction error pending\n"));
+ return -1;
+ }
+
+
+ if (tdb->transaction->nesting != 0) {
+ tdb->transaction->nesting--;
+ return 0;
+ }
+
+ /* check for a null transaction */
+ if (tdb->transaction->blocks == NULL) {
+ _tdb_transaction_cancel(tdb);
+ return 0;
+ }
+
+ if (!tdb->transaction->prepared) {
+ int ret = _tdb_transaction_prepare_commit(tdb);
+ if (ret)
+ return ret;
+ }
+
+ methods = tdb->transaction->io_methods;
+
+ /* perform all the writes */
+ for (i=0;i<tdb->transaction->num_blocks;i++) {
+ tdb_off_t offset;
+ tdb_len_t length;
+
+ if (tdb->transaction->blocks[i] == NULL) {
+ continue;
+ }
+
+ offset = i * tdb->transaction->block_size;
+ length = tdb->transaction->block_size;
+ if (i == tdb->transaction->num_blocks-1) {
+ length = tdb->transaction->last_block_size;
+ }
+
+ if (methods->tdb_write(tdb, offset, tdb->transaction->blocks[i], length) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: write failed during commit\n"));
+
+ /* we've overwritten part of the data and
+ possibly expanded the file, so we need to
+ run the crash recovery code */
+ tdb->methods = methods;
+ tdb_transaction_recover(tdb);
+
+ _tdb_transaction_cancel(tdb);
+
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_commit: write failed\n"));
+ return -1;
+ }
+ SAFE_FREE(tdb->transaction->blocks[i]);
+ }
+
+ /* Do this before we drop lock or blocks. */
+ if (tdb->transaction->expanded) {
+ need_repack = repack_worthwhile(tdb);
+ }
+
+ SAFE_FREE(tdb->transaction->blocks);
+ tdb->transaction->num_blocks = 0;
+
+ /* ensure the new data is on disk */
+ if (transaction_sync(tdb, 0, tdb->map_size) == -1) {
+ return -1;
+ }
+
+ /*
+ TODO: maybe write to some dummy hdr field, or write to magic
+ offset without mmap, before the last sync, instead of the
+ utime() call
+ */
+
+ /* on some systems (like Linux 2.6.x) changes via mmap/msync
+ don't change the mtime of the file, this means the file may
+ not be backed up (as tdb rounding to block sizes means that
+ file size changes are quite rare too). The following forces
+ mtime changes when a transaction completes */
+#ifdef HAVE_UTIME
+ utime(tdb->name, NULL);
+#endif
+
+ /* use a transaction cancel to free memory and remove the
+ transaction locks */
+ _tdb_transaction_cancel(tdb);
+
+ if (need_repack) {
+ return tdb_repack(tdb);
+ }
+
+ return 0;
+}
+
+
+/*
+ recover from an aborted transaction. Must be called with exclusive
+ database write access already established (including the open
+ lock to prevent new processes attaching)
+*/
+int tdb_transaction_recover(struct tdb_context *tdb)
+{
+ tdb_off_t recovery_head, recovery_eof;
+ unsigned char *data, *p;
+ uint32_t zero = 0;
+ struct tdb_record rec;
+
+ /* find the recovery area */
+ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &recovery_head) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to read recovery head\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ if (recovery_head == 0) {
+ /* we have never allocated a recovery record */
+ return 0;
+ }
+
+ /* read the recovery record */
+ if (tdb->methods->tdb_read(tdb, recovery_head, &rec,
+ sizeof(rec), DOCONV()) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to read recovery record\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ if (rec.magic != TDB_RECOVERY_MAGIC) {
+ /* there is no valid recovery data */
+ return 0;
+ }
+
+ if (tdb->read_only) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: attempt to recover read only database\n"));
+ tdb->ecode = TDB_ERR_CORRUPT;
+ return -1;
+ }
+
+ recovery_eof = rec.key_len;
+
+ data = (unsigned char *)malloc(rec.data_len);
+ if (data == NULL) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to allocate recovery data\n"));
+ tdb->ecode = TDB_ERR_OOM;
+ return -1;
+ }
+
+ /* read the full recovery data */
+ if (tdb->methods->tdb_read(tdb, recovery_head + sizeof(rec), data,
+ rec.data_len, 0) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to read recovery data\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ /* recover the file data */
+ p = data;
+ while (p+8 < data + rec.data_len) {
+ uint32_t ofs, len;
+ if (DOCONV()) {
+ tdb_convert(p, 8);
+ }
+ memcpy(&ofs, p, 4);
+ memcpy(&len, p+4, 4);
+
+ if (tdb->methods->tdb_write(tdb, ofs, p+8, len) == -1) {
+ free(data);
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to recover %u bytes at offset %u\n", len, ofs));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+ p += 8 + len;
+ }
+
+ free(data);
+
+ if (transaction_sync(tdb, 0, tdb->map_size) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to sync recovery\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ /* if the recovery area is after the recovered eof then remove it */
+ if (recovery_eof <= recovery_head) {
+ if (tdb_ofs_write(tdb, TDB_RECOVERY_HEAD, &zero) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to remove recovery head\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+ }
+
+ /* remove the recovery magic */
+ if (tdb_ofs_write(tdb, recovery_head + offsetof(struct tdb_record, magic),
+ &zero) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to remove recovery magic\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ if (transaction_sync(tdb, 0, recovery_eof) == -1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_transaction_recover: failed to sync2 recovery\n"));
+ tdb->ecode = TDB_ERR_IO;
+ return -1;
+ }
+
+ TDB_LOG((tdb, TDB_DEBUG_TRACE, "tdb_transaction_recover: recovered %u byte database\n",
+ recovery_eof));
+
+ /* all done */
+ return 0;
+}
+
+/* Any I/O failures we say "needs recovery". */
+bool tdb_needs_recovery(struct tdb_context *tdb)
+{
+ tdb_off_t recovery_head;
+ struct tdb_record rec;
+
+ /* find the recovery area */
+ if (tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &recovery_head) == -1) {
+ return true;
+ }
+
+ if (recovery_head == 0) {
+ /* we have never allocated a recovery record */
+ return false;
+ }
+
+ /* read the recovery record */
+ if (tdb->methods->tdb_read(tdb, recovery_head, &rec,
+ sizeof(rec), DOCONV()) == -1) {
+ return true;
+ }
+
+ return (rec.magic == TDB_RECOVERY_MAGIC);
+}
diff --git a/common/traverse.c b/common/traverse.c
new file mode 100644
index 0000000..618670f
--- /dev/null
+++ b/common/traverse.c
@@ -0,0 +1,366 @@
+ /*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2005
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000-2003
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "tdb_private.h"
+
+#define TDB_NEXT_LOCK_ERR ((tdb_off_t)-1)
+
+/* Uses traverse lock: 0 = finish, TDB_NEXT_LOCK_ERR = error,
+ other = record offset */
+static tdb_off_t tdb_next_lock(struct tdb_context *tdb, struct tdb_traverse_lock *tlock,
+ struct tdb_record *rec)
+{
+ int want_next = (tlock->off != 0);
+
+ /* Lock each chain from the start one. */
+ for (; tlock->hash < tdb->hash_size; tlock->hash++) {
+ if (!tlock->off && tlock->hash != 0) {
+ /* this is an optimisation for the common case where
+ the hash chain is empty, which is particularly
+ common for the use of tdb with ldb, where large
+ hashes are used. In that case we spend most of our
+ time in tdb_brlock(), locking empty hash chains.
+
+ To avoid this, we do an unlocked pre-check to see
+ if the hash chain is empty before starting to look
+ inside it. If it is empty then we can avoid that
+ hash chain. If it isn't empty then we can't believe
+ the value we get back, as we read it without a
+ lock, so instead we get the lock and re-fetch the
+ value below.
+
+ Notice that not doing this optimisation on the
+ first hash chain is critical. We must guarantee
+ that we have done at least one fcntl lock at the
+ start of a search to guarantee that memory is
+ coherent on SMP systems. If records are added by
+ others during the search then thats OK, and we
+ could possibly miss those with this trick, but we
+ could miss them anyway without this trick, so the
+ semantics don't change.
+
+ With a non-indexed ldb search this trick gains us a
+ factor of around 80 in speed on a linux 2.6.x
+ system (testing using ldbtest).
+ */
+ tdb->methods->next_hash_chain(tdb, &tlock->hash);
+ if (tlock->hash == tdb->hash_size) {
+ continue;
+ }
+ }
+
+ if (tdb_lock(tdb, tlock->hash, tlock->lock_rw) == -1)
+ return TDB_NEXT_LOCK_ERR;
+
+ /* No previous record? Start at top of chain. */
+ if (!tlock->off) {
+ if (tdb_ofs_read(tdb, TDB_HASH_TOP(tlock->hash),
+ &tlock->off) == -1)
+ goto fail;
+ } else {
+ /* Otherwise unlock the previous record. */
+ if (tdb_unlock_record(tdb, tlock->off) != 0)
+ goto fail;
+ }
+
+ if (want_next) {
+ /* We have offset of old record: grab next */
+ if (tdb_rec_read(tdb, tlock->off, rec) == -1)
+ goto fail;
+ tlock->off = rec->next;
+ }
+
+ /* Iterate through chain */
+ while( tlock->off) {
+ tdb_off_t current;
+ if (tdb_rec_read(tdb, tlock->off, rec) == -1)
+ goto fail;
+
+ /* Detect infinite loops. From "Shlomi Yaakobovich" <Shlomi@exanet.com>. */
+ if (tlock->off == rec->next) {
+ tdb->ecode = TDB_ERR_CORRUPT;
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_next_lock: loop detected.\n"));
+ goto fail;
+ }
+
+ if (!TDB_DEAD(rec)) {
+ /* Woohoo: we found one! */
+ if (tdb_lock_record(tdb, tlock->off) != 0)
+ goto fail;
+ return tlock->off;
+ }
+
+ /* Try to clean dead ones from old traverses */
+ current = tlock->off;
+ tlock->off = rec->next;
+ if (!(tdb->read_only || tdb->traverse_read) &&
+ tdb_do_delete(tdb, current, rec) != 0)
+ goto fail;
+ }
+ tdb_unlock(tdb, tlock->hash, tlock->lock_rw);
+ want_next = 0;
+ }
+ /* We finished iteration without finding anything */
+ tdb->ecode = TDB_SUCCESS;
+ return 0;
+
+ fail:
+ tlock->off = 0;
+ if (tdb_unlock(tdb, tlock->hash, tlock->lock_rw) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_next_lock: On error unlock failed!\n"));
+ return TDB_NEXT_LOCK_ERR;
+}
+
+/* traverse the entire database - calling fn(tdb, key, data) on each element.
+ return -1 on error or the record count traversed
+ if fn is NULL then it is not called
+ a non-zero return value from fn() indicates that the traversal should stop
+ */
+static int tdb_traverse_internal(struct tdb_context *tdb,
+ tdb_traverse_func fn, void *private_data,
+ struct tdb_traverse_lock *tl)
+{
+ TDB_DATA key, dbuf;
+ struct tdb_record rec;
+ int ret = 0, count = 0;
+ tdb_off_t off;
+
+ /* This was in the initialization, above, but the IRIX compiler
+ * did not like it. crh
+ */
+ tl->next = tdb->travlocks.next;
+
+ /* fcntl locks don't stack: beware traverse inside traverse */
+ tdb->travlocks.next = tl;
+
+ /* tdb_next_lock places locks on the record returned, and its chain */
+ while ((off = tdb_next_lock(tdb, tl, &rec)) != 0) {
+ if (off == TDB_NEXT_LOCK_ERR) {
+ ret = -1;
+ goto out;
+ }
+ count++;
+ /* now read the full record */
+ key.dptr = tdb_alloc_read(tdb, tl->off + sizeof(rec),
+ rec.key_len + rec.data_len);
+ if (!key.dptr) {
+ ret = -1;
+ if (tdb_unlock(tdb, tl->hash, tl->lock_rw) != 0)
+ goto out;
+ if (tdb_unlock_record(tdb, tl->off) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_traverse: key.dptr == NULL and unlock_record failed!\n"));
+ goto out;
+ }
+ key.dsize = rec.key_len;
+ dbuf.dptr = key.dptr + rec.key_len;
+ dbuf.dsize = rec.data_len;
+
+ tdb_trace_1rec_retrec(tdb, "traverse", key, dbuf);
+
+ /* Drop chain lock, call out */
+ if (tdb_unlock(tdb, tl->hash, tl->lock_rw) != 0) {
+ ret = -1;
+ SAFE_FREE(key.dptr);
+ goto out;
+ }
+ if (fn && fn(tdb, key, dbuf, private_data)) {
+ /* They want us to terminate traversal */
+ tdb_trace_ret(tdb, "tdb_traverse_end", count);
+ if (tdb_unlock_record(tdb, tl->off) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_traverse: unlock_record failed!\n"));;
+ ret = -1;
+ }
+ SAFE_FREE(key.dptr);
+ goto out;
+ }
+ SAFE_FREE(key.dptr);
+ }
+ tdb_trace(tdb, "tdb_traverse_end");
+out:
+ tdb->travlocks.next = tl->next;
+ if (ret < 0)
+ return -1;
+ else
+ return count;
+}
+
+
+/*
+ a read style traverse - temporarily marks the db read only
+*/
+_PUBLIC_ int tdb_traverse_read(struct tdb_context *tdb,
+ tdb_traverse_func fn, void *private_data)
+{
+ struct tdb_traverse_lock tl = { NULL, 0, 0, F_RDLCK };
+ int ret;
+
+ /* we need to get a read lock on the transaction lock here to
+ cope with the lock ordering semantics of solaris10 */
+ if (tdb_transaction_lock(tdb, F_RDLCK, TDB_LOCK_WAIT)) {
+ return -1;
+ }
+
+ tdb->traverse_read++;
+ tdb_trace(tdb, "tdb_traverse_read_start");
+ ret = tdb_traverse_internal(tdb, fn, private_data, &tl);
+ tdb->traverse_read--;
+
+ tdb_transaction_unlock(tdb, F_RDLCK);
+
+ return ret;
+}
+
+/*
+ a write style traverse - needs to get the transaction lock to
+ prevent deadlocks
+
+ WARNING: The data buffer given to the callback fn does NOT meet the
+ alignment guarantees malloc gives you.
+*/
+_PUBLIC_ int tdb_traverse(struct tdb_context *tdb,
+ tdb_traverse_func fn, void *private_data)
+{
+ struct tdb_traverse_lock tl = { NULL, 0, 0, F_WRLCK };
+ int ret;
+
+ if (tdb->read_only || tdb->traverse_read) {
+ return tdb_traverse_read(tdb, fn, private_data);
+ }
+
+ if (tdb_transaction_lock(tdb, F_WRLCK, TDB_LOCK_WAIT)) {
+ return -1;
+ }
+
+ tdb->traverse_write++;
+ tdb_trace(tdb, "tdb_traverse_start");
+ ret = tdb_traverse_internal(tdb, fn, private_data, &tl);
+ tdb->traverse_write--;
+
+ tdb_transaction_unlock(tdb, F_WRLCK);
+
+ return ret;
+}
+
+
+/* find the first entry in the database and return its key */
+_PUBLIC_ TDB_DATA tdb_firstkey(struct tdb_context *tdb)
+{
+ TDB_DATA key;
+ struct tdb_record rec;
+ tdb_off_t off;
+
+ /* release any old lock */
+ if (tdb_unlock_record(tdb, tdb->travlocks.off) != 0)
+ return tdb_null;
+ tdb->travlocks.off = tdb->travlocks.hash = 0;
+ tdb->travlocks.lock_rw = F_RDLCK;
+
+ /* Grab first record: locks chain and returned record. */
+ off = tdb_next_lock(tdb, &tdb->travlocks, &rec);
+ if (off == 0 || off == TDB_NEXT_LOCK_ERR) {
+ tdb_trace_retrec(tdb, "tdb_firstkey", tdb_null);
+ return tdb_null;
+ }
+ /* now read the key */
+ key.dsize = rec.key_len;
+ key.dptr =tdb_alloc_read(tdb,tdb->travlocks.off+sizeof(rec),key.dsize);
+
+ tdb_trace_retrec(tdb, "tdb_firstkey", key);
+
+ /* Unlock the hash chain of the record we just read. */
+ if (tdb_unlock(tdb, tdb->travlocks.hash, tdb->travlocks.lock_rw) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_firstkey: error occurred while tdb_unlocking!\n"));
+ return key;
+}
+
+/* find the next entry in the database, returning its key */
+_PUBLIC_ TDB_DATA tdb_nextkey(struct tdb_context *tdb, TDB_DATA oldkey)
+{
+ uint32_t oldhash;
+ TDB_DATA key = tdb_null;
+ struct tdb_record rec;
+ unsigned char *k = NULL;
+ tdb_off_t off;
+
+ /* Is locked key the old key? If so, traverse will be reliable. */
+ if (tdb->travlocks.off) {
+ if (tdb_lock(tdb,tdb->travlocks.hash,tdb->travlocks.lock_rw))
+ return tdb_null;
+ if (tdb_rec_read(tdb, tdb->travlocks.off, &rec) == -1
+ || !(k = tdb_alloc_read(tdb,tdb->travlocks.off+sizeof(rec),
+ rec.key_len))
+ || memcmp(k, oldkey.dptr, oldkey.dsize) != 0) {
+ /* No, it wasn't: unlock it and start from scratch */
+ if (tdb_unlock_record(tdb, tdb->travlocks.off) != 0) {
+ tdb_trace_1rec_retrec(tdb, "tdb_nextkey",
+ oldkey, tdb_null);
+ SAFE_FREE(k);
+ return tdb_null;
+ }
+ if (tdb_unlock(tdb, tdb->travlocks.hash, tdb->travlocks.lock_rw) != 0) {
+ SAFE_FREE(k);
+ return tdb_null;
+ }
+ tdb->travlocks.off = 0;
+ }
+
+ SAFE_FREE(k);
+ }
+
+ if (!tdb->travlocks.off) {
+ /* No previous element: do normal find, and lock record */
+ tdb->travlocks.off = tdb_find_lock_hash(tdb, oldkey, tdb->hash_fn(&oldkey), tdb->travlocks.lock_rw, &rec);
+ if (!tdb->travlocks.off) {
+ tdb_trace_1rec_retrec(tdb, "tdb_nextkey", oldkey, tdb_null);
+ return tdb_null;
+ }
+ tdb->travlocks.hash = BUCKET(rec.full_hash);
+ if (tdb_lock_record(tdb, tdb->travlocks.off) != 0) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_nextkey: lock_record failed (%s)!\n", strerror(errno)));
+ return tdb_null;
+ }
+ }
+ oldhash = tdb->travlocks.hash;
+
+ /* Grab next record: locks chain and returned record,
+ unlocks old record */
+ off = tdb_next_lock(tdb, &tdb->travlocks, &rec);
+ if (off != TDB_NEXT_LOCK_ERR && off != 0) {
+ key.dsize = rec.key_len;
+ key.dptr = tdb_alloc_read(tdb, tdb->travlocks.off+sizeof(rec),
+ key.dsize);
+ /* Unlock the chain of this new record */
+ if (tdb_unlock(tdb, tdb->travlocks.hash, tdb->travlocks.lock_rw) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_nextkey: WARNING tdb_unlock failed!\n"));
+ }
+ /* Unlock the chain of old record */
+ if (tdb_unlock(tdb, BUCKET(oldhash), tdb->travlocks.lock_rw) != 0)
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "tdb_nextkey: WARNING tdb_unlock failed!\n"));
+ tdb_trace_1rec_retrec(tdb, "tdb_nextkey", oldkey, key);
+ return key;
+}
+
diff --git a/configure b/configure
new file mode 100755
index 0000000..6a9f875
--- /dev/null
+++ b/configure
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+PREVPATH=`dirname $0`
+
+if [ -f $PREVPATH/../../buildtools/bin/waf ]; then
+ WAF=../../buildtools/bin/waf
+elif [ -f $PREVPATH/buildtools/bin/waf ]; then
+ WAF=./buildtools/bin/waf
+else
+ echo "replace: Unable to find waf"
+ exit 1
+fi
+
+# using JOBS=1 gives maximum compatibility with
+# systems like AIX which have broken threading in python
+JOBS=1
+export JOBS
+
+cd . || exit 1
+$WAF configure "$@" || exit 1
+cd $PREVPATH
diff --git a/docs/README b/docs/README
new file mode 100644
index 0000000..be2224f
--- /dev/null
+++ b/docs/README
@@ -0,0 +1,273 @@
+tdb - a trivial database system
+tridge@linuxcare.com December 1999
+==================================
+
+This is a simple database API. It was inspired by the realisation that
+in Samba we have several ad-hoc bits of code that essentially
+implement small databases for sharing structures between parts of
+Samba. As I was about to add another I realised that a generic
+database module was called for to replace all the ad-hoc bits.
+
+I based the interface on gdbm. I couldn't use gdbm as we need to be
+able to have multiple writers to the databases at one time.
+
+Compilation
+-----------
+
+add HAVE_MMAP=1 to use mmap instead of read/write
+add NOLOCK=1 to disable locking code
+
+Testing
+-------
+
+Compile tdbtest.c and link with gdbm for testing. tdbtest will perform
+identical operations via tdb and gdbm then make sure the result is the
+same
+
+Also included is tdbtool, which allows simple database manipulation
+on the commandline.
+
+tdbtest and tdbtool are not built as part of Samba, but are included
+for completeness.
+
+Interface
+---------
+
+The interface is very similar to gdbm except for the following:
+
+- different open interface. The tdb_open call is more similar to a
+ traditional open()
+- no tdbm_reorganise() function
+- no tdbm_sync() function. No operations are cached in the library anyway
+- added a tdb_traverse() function for traversing the whole database
+- added transactions support
+
+A general rule for using tdb is that the caller frees any returned
+TDB_DATA structures. Just call free(p.dptr) to free a TDB_DATA
+return value called p. This is the same as gdbm.
+
+here is a full list of tdb functions with brief descriptions.
+
+
+----------------------------------------------------------------------
+TDB_CONTEXT *tdb_open(char *name, int hash_size, int tdb_flags,
+ int open_flags, mode_t mode)
+
+ open the database, creating it if necessary
+
+ The open_flags and mode are passed straight to the open call on the database
+ file. A flags value of O_WRONLY is invalid
+
+ The hash size is advisory, use zero for a default value.
+
+ return is NULL on error
+
+ possible tdb_flags are:
+ TDB_CLEAR_IF_FIRST - clear database if we are the only one with it open
+ TDB_INTERNAL - don't use a file, instead store the data in
+ memory. The filename is ignored in this case.
+ TDB_NOLOCK - don't do any locking
+ TDB_NOMMAP - don't use mmap
+ TDB_NOSYNC - don't synchronise transactions to disk
+ TDB_SEQNUM - maintain a sequence number
+ TDB_VOLATILE - activate the per-hashchain freelist, default 5
+ TDB_ALLOW_NESTING - allow transactions to nest
+ TDB_DISALLOW_NESTING - disallow transactions to nest
+
+----------------------------------------------------------------------
+TDB_CONTEXT *tdb_open_ex(char *name, int hash_size, int tdb_flags,
+ int open_flags, mode_t mode,
+ const struct tdb_logging_context *log_ctx,
+ tdb_hash_func hash_fn)
+
+This is like tdb_open(), but allows you to pass an initial logging and
+hash function. Be careful when passing a hash function - all users of
+the database must use the same hash function or you will get data
+corruption.
+
+
+----------------------------------------------------------------------
+char *tdb_error(TDB_CONTEXT *tdb);
+
+ return a error string for the last tdb error
+
+----------------------------------------------------------------------
+int tdb_close(TDB_CONTEXT *tdb);
+
+ close a database
+
+----------------------------------------------------------------------
+TDB_DATA tdb_fetch(TDB_CONTEXT *tdb, TDB_DATA key);
+
+ fetch an entry in the database given a key
+ if the return value has a null dptr then a error occurred
+
+ caller must free the resulting data
+
+----------------------------------------------------------------------
+int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key,
+ int (*parser)(TDB_DATA key, TDB_DATA data,
+ void *private_data),
+ void *private_data);
+
+ Hand a record to a parser function without allocating it.
+
+ This function is meant as a fast tdb_fetch alternative for large records
+ that are frequently read. The "key" and "data" arguments point directly
+ into the tdb shared memory, they are not aligned at any boundary.
+
+ WARNING: The parser is called while tdb holds a lock on the record. DO NOT
+ call other tdb routines from within the parser. Also, for good performance
+ you should make the parser fast to allow parallel operations.
+
+ tdb_parse_record returns -1 if the record was not found. If the record was
+ found, the return value of "parser" is passed up to the caller.
+
+----------------------------------------------------------------------
+int tdb_exists(TDB_CONTEXT *tdb, TDB_DATA key);
+
+ check if an entry in the database exists
+
+ note that 1 is returned if the key is found and 0 is returned if not found
+ this doesn't match the conventions in the rest of this module, but is
+ compatible with gdbm
+
+----------------------------------------------------------------------
+int tdb_traverse(TDB_CONTEXT *tdb, int (*fn)(TDB_CONTEXT *tdb,
+ TDB_DATA key, TDB_DATA dbuf, void *state), void *state);
+
+ traverse the entire database - calling fn(tdb, key, data, state) on each
+ element.
+
+ return -1 on error or the record count traversed
+
+ if fn is NULL then it is not called
+
+ a non-zero return value from fn() indicates that the traversal
+ should stop. Traversal callbacks may not start transactions.
+
+ WARNING: The data buffer given to the callback fn does NOT meet the
+ alignment restrictions malloc gives you.
+
+----------------------------------------------------------------------
+int tdb_traverse_read(TDB_CONTEXT *tdb, int (*fn)(TDB_CONTEXT *tdb,
+ TDB_DATA key, TDB_DATA dbuf, void *state), void *state);
+
+ traverse the entire database - calling fn(tdb, key, data, state) on
+ each element, but marking the database read only during the
+ traversal, so any write operations will fail. This allows tdb to
+ use read locks, which increases the parallelism possible during the
+ traversal.
+
+ return -1 on error or the record count traversed
+
+ if fn is NULL then it is not called
+
+ a non-zero return value from fn() indicates that the traversal
+ should stop. Traversal callbacks may not start transactions.
+
+----------------------------------------------------------------------
+TDB_DATA tdb_firstkey(TDB_CONTEXT *tdb);
+
+ find the first entry in the database and return its key
+
+ the caller must free the returned data
+
+----------------------------------------------------------------------
+TDB_DATA tdb_nextkey(TDB_CONTEXT *tdb, TDB_DATA key);
+
+ find the next entry in the database, returning its key
+
+ the caller must free the returned data
+
+----------------------------------------------------------------------
+int tdb_delete(TDB_CONTEXT *tdb, TDB_DATA key);
+
+ delete an entry in the database given a key
+
+----------------------------------------------------------------------
+int tdb_store(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, int flag);
+
+ store an element in the database, replacing any existing element
+ with the same key
+
+ If flag==TDB_INSERT then don't overwrite an existing entry
+ If flag==TDB_MODIFY then don't create a new entry
+
+ return 0 on success, -1 on failure
+
+----------------------------------------------------------------------
+int tdb_writelock(TDB_CONTEXT *tdb);
+
+ lock the database. If we already have it locked then don't do anything
+
+----------------------------------------------------------------------
+int tdb_writeunlock(TDB_CONTEXT *tdb);
+ unlock the database
+
+----------------------------------------------------------------------
+int tdb_lockchain(TDB_CONTEXT *tdb, TDB_DATA key);
+
+ lock one hash chain. This is meant to be used to reduce locking
+ contention - it cannot guarantee how many records will be locked
+
+----------------------------------------------------------------------
+int tdb_unlockchain(TDB_CONTEXT *tdb, TDB_DATA key);
+
+ unlock one hash chain
+
+----------------------------------------------------------------------
+int tdb_transaction_start(TDB_CONTEXT *tdb)
+
+ start a transaction. All operations after the transaction start can
+ either be committed with tdb_transaction_commit() or cancelled with
+ tdb_transaction_cancel().
+
+ If you call tdb_transaction_start() again on the same tdb context
+ while a transaction is in progress, then the same transaction
+ buffer is re-used. The number of tdb_transaction_{commit,cancel}
+ operations must match the number of successful
+ tdb_transaction_start() calls.
+
+ Note that transactions are by default disk synchronous, and use a
+ recover area in the database to automatically recover the database
+ on the next open if the system crashes during a transaction. You
+ can disable the synchronous transaction recovery setup using the
+ TDB_NOSYNC flag, which will greatly speed up operations at the risk
+ of corrupting your database if the system crashes.
+
+ Operations made within a transaction are not visible to other users
+ of the database until a successful commit.
+
+----------------------------------------------------------------------
+int tdb_transaction_cancel(TDB_CONTEXT *tdb)
+
+ cancel a current transaction, discarding all write and lock
+ operations that have been made since the transaction started.
+
+
+----------------------------------------------------------------------
+int tdb_transaction_commit(TDB_CONTEXT *tdb)
+
+ commit a current transaction, updating the database and releasing
+ the transaction locks.
+
+----------------------------------------------------------------------
+int tdb_transaction_prepare_commit(TDB_CONTEXT *tdb)
+
+ prepare to commit a current transaction, for two-phase commits.
+ Once prepared for commit, the only allowed calls are
+ tdb_transaction_commit() or tdb_transaction_cancel(). Preparing
+ allocates disk space for the pending updates, so a subsequent
+ commit should succeed (barring any hardware failures).
+
+----------------------------------------------------------------------
+int tdb_check(TDB_CONTEXT *tdb,
+ int (*check)(TDB_DATA key, TDB_DATA data, void *private_data),
+ void *private_data);)
+
+ check the consistency of the database, calling back the check function
+ (if non-NULL) with each record. If some consistency check fails, or
+ the supplied check function returns -1, tdb_check returns -1, otherwise
+ 0. Note that logging function (if set) will be called with additional
+ information on the corruption found.
diff --git a/docs/mainpage.dox b/docs/mainpage.dox
new file mode 100644
index 0000000..d130769
--- /dev/null
+++ b/docs/mainpage.dox
@@ -0,0 +1,61 @@
+/**
+
+@mainpage
+
+This is a simple database API. It was inspired by the realisation that in Samba
+we have several ad-hoc bits of code that essentially implement small databases
+for sharing structures between parts of Samba.
+
+The interface is based on gdbm. gdbm couldn't be use as we needed to be able to
+have multiple writers to the databases at one time.
+
+@section tdb_download Download
+
+You can download the latest releases of tdb from the
+<a href="http://samba.org/ftp/tdb">tdb directory</a> on the samba public source
+archive.
+
+You can download the latest code either via git or rsync.
+
+To fetch via git see the following guide:
+
+<a href="http://wiki.samba.org/index.php/Using_Git_for_Samba_Development">Using Git for Samba Development</a>
+Once you have cloned the tree switch to the master branch and cd into the source/lib/tdb directory.
+
+To fetch via rsync use these commands:
+
+<pre>
+ rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/tdb .
+ rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/replace .
+</pre>
+
+and build in tdb. It will find the replace library in the directory above
+automatically.
+
+@section tdb_bugs Discussion and bug reports
+
+tdb does not currently have its own mailing list or bug tracking system. For now,
+please use the
+<a href="https://lists.samba.org/mailman/listinfo/samba-technical">samba-technical</a>
+mailing list, and the <a href="http://bugzilla.samba.org/">Samba bugzilla</a> bug
+tracking system.
+
+
+@section tdb_compilation Compilation
+
+add HAVE_MMAP=1 to use mmap instead of read/write
+add NOLOCK=1 to disable locking code
+
+@section tdb_testing Testing
+
+Compile tdbtest.c and link with gdbm for testing. tdbtest will perform
+identical operations via tdb and gdbm then make sure the result is the
+same
+
+Also included is tdbtool, which allows simple database manipulation
+on the commandline.
+
+tdbtest and tdbtool are not built as part of Samba, but are included
+for completeness.
+
+*/
diff --git a/docs/mutex.txt b/docs/mutex.txt
new file mode 100644
index 0000000..7625662
--- /dev/null
+++ b/docs/mutex.txt
@@ -0,0 +1,136 @@
+Tdb is a hashtable database with multiple concurrent writer and external
+record lock support. For speed reasons, wherever possible tdb uses a shared
+memory mapped area for data access. In its currently released form, it uses
+fcntl byte-range locks to coordinate access to the data itself.
+
+The tdb data is organized as a hashtable. Hash collisions are dealt with by
+forming a linked list of records that share a hash value. The individual
+linked lists are protected across processes with 1-byte fcntl locks on the
+starting pointer of the linked list representing a hash value.
+
+The external locking API of tdb allows to lock individual records. Instead of
+really locking individual records, the tdb API locks a complete linked list
+with a fcntl lock.
+
+The external locking API of tdb also allows to lock the complete database, and
+ctdb uses this facility to freeze databases during a recovery. While the
+so-called allrecord lock is held, all linked lists and all individual records
+are frozen alltogether. Tdb achieves this by locking the complete file range
+with a single fcntl lock. Individual 1-byte locks for the linked lists
+conflict with this. Access to records is prevented by the one large fnctl byte
+range lock.
+
+Fcntl locks have been chosen for tdb for two reasons: First they are portable
+across all current unixes. Secondly they provide auto-cleanup. If a process
+dies while holding a fcntl lock, the lock is given up as if it was explicitly
+unlocked. Thus fcntl locks provide a very robust locking scheme, if a process
+dies for any reason the database will not stay blocked until reboot. This
+robustness is very important for long-running services, a reboot is not an
+option for most users of tdb.
+
+Unfortunately, during stress testing, fcntl locks have turned out to be a major
+problem for performance. The particular problem that was seen happens when
+ctdb on a busy server does a recovery. A recovery means that ctdb has to
+freeze all tdb databases for some time, usually a few seconds. This is done
+with the allrecord lock. During the recovery phase on a busy server many smbd
+processes try to access the tdb file with blocking fcntl calls. The specific
+test in question easily reproduces 7,000 processes piling up waiting for
+1-byte fcntl locks. When ctdb is done with the recovery, it gives up the
+allrecord lock, covering the whole file range. All 7,000 processes waiting for
+1-byte fcntl locks are woken up, trying to acquire their lock. The special
+implementation of fcntl locks in Linux (up to 2013-02-12 at least) protects
+all fcntl lock operations with a single system-wide spinlock. If 7,000 process
+waiting for the allrecord lock to become released this leads to a thundering
+herd condition, all CPUs are spinning on that single spinlock.
+
+Functionally the kernel is fine, eventually the thundering herd slows down and
+every process correctly gets his share and locking range, but the performance
+of the system while the herd is active is worse than expected.
+
+The thundering herd is only the worst case scenario for fcntl lock use. The
+single spinlock for fcntl operations is also a performance penalty for normal
+operations. In the cluster case, every read and write SMB request has to do
+two fcntl calls to provide correct SMB mandatory locks. The single spinlock
+is one source of serialization for the SMB read/write requests, limiting the
+parallelism that can be achieved in a multi-core system.
+
+While trying to tune his servers, Ira Cooper, Samba Team member, found fcntl
+locks to be a problem on Solaris as well. Ira pointed out that there is a
+potential alternative locking mechanism that might be more scalable: Process
+shared robust mutexes, as defined by Posix 2008 for example via
+
+http://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutexattr_setpshared.html
+http://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutexattr_setrobust.html
+
+Pthread mutexes provide one of the core mechanisms in posix threads to protect
+in-process data structures from concurrent access by multiple threads. In the
+Linux implementation, a pthread_mutex_t is represented by a data structure in
+user space that requires no kernel calls in the uncontended case for locking
+and unlocking. Locking and unlocking in the uncontended case is implemented
+purely in user space with atomic CPU instructions and thus are very fast.
+
+The setpshared functions indicate to the kernel that the mutex is about to be
+shared between processes in a common shared memory area.
+
+The process shared posix mutexes have the potential to replace fcntl locking
+to coordinate mmap access for tdbs. However, they are missing the criticial
+auto-cleanup property that fcntl provides when a process dies. A process that
+dies hard while holding a shared mutex has no chance to clean up the protected
+data structures and unlock the shared mutex. Thus with a pure process shared
+mutex the mutex will remain locked forever until the data structures are
+re-initialized from scratch.
+
+With the robust mutexes defined by Posix the process shared mutexes have been
+extended with a limited auto-cleanup property. If a mutex has been declared
+robust, when a process exits while holding that mutex, the next process trying
+to lock the mutex will get the special error message EOWNERDEAD. This informs
+the caller that the data structures the mutex protects are potentially corrupt
+and need to be cleaned up.
+
+The error message EOWNERDEAD when trying to lock a mutex is an extension over
+the fcntl functionality. A process that does a blocking fcntl lock call is not
+informed about whether the lock was explicitly freed by a process still alive
+or due to an unplanned process exit. At the time of this writing (February
+2013), at least Linux and OpenSolaris also implement the robustness feature of
+process-shared mutexes.
+
+Converting the tdb locking mechanism from fcntl to mutexes has to take care of
+both types of locks that are used on tdb files.
+
+The easy part is to use mutexes to replace the 1-byte linked list locks
+covering the individual hashes. Those can be represented by a mutex each.
+
+Covering the allrecord lock is more difficult. The allrecord lock uses a fcntl
+lock spanning all hash list locks simultaneously. This basic functionality is
+not easily possible with mutexes. A mutex carries 1 bit of information, a
+fcntl lock can carry an arbitrary amount of information.
+
+In order to support the allrecord lock, we have an allrecord_lock variable
+protected by an allrecord_mutex. The coordination between the allrecord lock
+and the chainlocks works like this:
+
+- Getting a chain lock works like this:
+
+ 1. get chain mutex
+ 2. return success if allrecord_lock is F_UNLCK (not locked)
+ 3. return success if allrecord_lock is F_RDLCK (locked readonly)
+ and we only need a read lock.
+ 4. release chain mutex
+ 5. wait for allrecord_mutex
+ 6. unlock allrecord_mutex
+ 7. goto 1.
+
+- Getting the allrecord lock:
+
+ 1. get the allrecord mutex
+ 2. return error if allrecord_lock is not F_UNLCK (it's locked)
+ 3. set allrecord_lock to the desired value.
+ 4. in a loop: lock(blocking) / unlock each chain mutex.
+ 5. return success.
+
+- allrecord lock upgrade:
+
+ 1. check we already have the allrecord lock with F_RDLCK.
+ 3. set allrecord_lock to F_WRLCK
+ 4. in a loop: lock(blocking) / unlock each chain mutex.
+ 5. return success.
diff --git a/docs/tdb.magic b/docs/tdb.magic
new file mode 100644
index 0000000..f5619e7
--- /dev/null
+++ b/docs/tdb.magic
@@ -0,0 +1,10 @@
+# Magic file(1) information about tdb files.
+#
+# Install this into /etc/magic or the corresponding location for your
+# system, or pass as a -m argument to file(1).
+
+# You may use and redistribute this file without restriction.
+
+0 string TDB\ file TDB database
+>32 lelong =0x2601196D version 6, little-endian
+>>36 lelong x hash size %d bytes
diff --git a/docs/tracing.txt b/docs/tracing.txt
new file mode 100644
index 0000000..98c5db9
--- /dev/null
+++ b/docs/tracing.txt
@@ -0,0 +1,46 @@
+How And Why To Use TDB Tracing
+==============================
+
+You can trace all TDB operations, using TDB_TRACE. It is not complete
+(error conditions which expect to the logged will not always be traced
+correctly, so you should set up a logging function too), but is designed
+to collect benchmark-style traces to allow us to optimize TDB.
+
+Note: tracing is not efficient, and the trace files are huge: a
+traverse of the database is particularly large! But they compress very
+well with rzip (http://rzip.samba.org)
+
+How to gather trace files:
+--------------------------
+1) Uncomment /* #define TDB_TRACE 1 */ in tdb_private.h.
+2) Rebuild TDB, and everything that uses it.
+3) Run something.
+
+Your trace files will be called <tdbname>.trace.<pid>. These files
+will not be overwritten: if the same process reopens the same TDB, an
+error will be logged and tracing will be disabled.
+
+How to replay trace files:
+--------------------------
+1) For benchmarking, remember to rebuild tdb with #define TDB_TRACE commented
+ out again!
+2) Grab the latest "replace_trace.c" from CCAN's tdb module (tools/ dir):
+ http://ccan.ozlabs.org/tarballs/tdb.tar.bz2
+3) Compile up replay_trace, munging as necessary.
+4) Run replay_trace <scratch-tdb-name> <tracefiles>...
+
+If given more than one trace file (presumably from the same tdb)
+replay_trace will try to figure out the dependencies between the operations
+and fire off a child to run each trace. Occasionally it gets stuck, in
+which case it will add another dependency and retry. Eventually it will
+give a speed value.
+
+replay_trace can intuit the existence of previous data in the tdb (ie.
+activity prior to the trace(s) supplied) and will prepopulate as
+neccessary.
+
+You can run --quiet for straight benchmark results, and -n to run multiple
+times (this saves time, since it need only calculate dependencies once).
+
+Good luck!
+Rusty Russell <rusty@rustcorp.com.au>
diff --git a/doxy.config b/doxy.config
new file mode 100644
index 0000000..f55e9c3
--- /dev/null
+++ b/doxy.config
@@ -0,0 +1,1697 @@
+# Doxyfile 1.7.3
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = tdb
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER = 1.2.9
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description for a project that appears at the top of each page and should give viewer a quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is
+# included in the documentation. The maximum height of the logo should not
+# exceed 55 pixels and the maximum width should not exceed 200 pixels.
+# Doxygen will copy the logo to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = docs
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
+# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF = "The $name class" \
+ "The $name widget" \
+ "The $name file" \
+ is \
+ provides \
+ specifies \
+ contains \
+ represents \
+ a \
+ an \
+ the
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful if your file system
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = YES
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given extension.
+# Doxygen has a built-in mapping, but you can override or extend it using this
+# tag. The format is ext=language, where ext is a file extension, and language
+# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
+# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
+# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also makes the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter
+# and setter methods for a property. Setting this option to YES (the default)
+# will make doxygen replace the get and set methods by a property in the
+# documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penalty.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will roughly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = NO
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespaces are hidden.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = YES
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = YES
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
+# rather than with sharp brackets.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper type resolution of all parameters of a function it will reject a
+# match between the prototype and the implementation of a member function even if there is only one candidate or it is obvious which candidate to choose by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
+# will still accept a match between prototype and implementation in such cases.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or macro consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and macros in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. The create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
+# DoxygenLayout.xml will be used as the name of the layout file.
+
+LAYOUT_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# The WARN_NO_PARAMDOC option can be enabled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = include \
+ docs
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
+# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
+# *.f90 *.f *.for *.vhd *.vhdl
+
+FILE_PATTERNS = *.cpp \
+ *.cc \
+ *.c \
+ *.h \
+ *.hh \
+ *.hpp \
+ *.dox
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS = */.git/*
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty or if
+# non of the patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
+# and it is also possible to disable source filtering for a specific pattern
+# using *.ext= (so without naming a filter). This option only has effect when
+# FILTER_SOURCE_FILES is enabled.
+
+FILTER_SOURCE_PATTERNS =
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the stylesheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The allowed range is 0 to 359.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
+# grayscales only. A value of 255 will produce the most vivid colors.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# and 100 does not change the gamma.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
+# this to NO can help when comparing the output of multiple runs.
+
+HTML_TIMESTAMP = NO
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded. For this to work a browser that supports
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
+# Qt Compressed Help (.qch) of the generated HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE =
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
+# Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
+# Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+# will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# the help appears.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
+# this name.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [0,1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+# Note that a value of 0 will completely suppress the enum values from appearing in the overview section.
+
+ENUM_VALUES_PER_LINE = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NONE
+
+# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
+# and Class Hierarchy pages using a tree view instead of an ordered list.
+
+USE_INLINE_TREES = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# links to external symbols imported via tag files in a separate window.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
+# in the HTML output before the changes have effect.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
+# (see http://www.mathjax.org) which uses client side Javascript for the
+# rendering instead of using prerendered bitmaps. Use this if you do not
+# have LaTeX installed or if you want to formulas look prettier in the HTML
+# output. When enabled you also need to install MathJax separately and
+# configure the path to it using the MATHJAX_RELPATH option.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you need to specify the location relative to the
+# HTML output directory using the MATHJAX_RELPATH option. The destination
+# directory should contain the MathJax.js script. For instance, if the mathjax
+# directory is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the mathjax.org site, so you can quickly see the result without installing
+# MathJax, but it is strongly recommended to install a local copy of MathJax
+# before deployment.
+
+MATHJAX_RELPATH = http://www.mathjax.org/mathjax
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
+# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+
+SEARCHENGINE = NO
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a PHP enabled web server instead of at the web client
+# using Javascript. Doxygen will generate the search PHP script and index
+# file to put on the web server. The advantage of the server
+# based approach is that it scales better to large projects and allows
+# full text search. The disadvantages are that it is more difficult to setup
+# and does not have live searching capabilities.
+
+SERVER_BASED_SEARCH = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
+# Makefile that is written to the output directory.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
+# such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = YES
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = YES
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED = DOXYGEN \
+ PRINTF_ATTRIBUTE(x,y)=
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition that overrules the definition found in the source code.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all references to function-like macros
+# that are alone on a line, have an all uppercase name, and do not end with a
+# semicolon, because these will confuse the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+#
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option also works with HAVE_DOT disabled, but it is recommended to
+# install and use dot, since it yields more powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
+# between CPU load and processing speed.
+
+DOT_NUM_THREADS = 0
+
+# By default doxygen will write a font called Helvetica to the output
+# directory and reference it in all dot files that doxygen generates.
+# When you want a differently looking font you can specify the font name
+# using DOT_FONTNAME. You need to make sure dot is able to find the font,
+# which can be done by putting it in a standard location or by setting the
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
+# containing the font.
+
+DOT_FONTNAME = FreeSans
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the output directory to look for the
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a
+# different font using DOT_FONTNAME you can set the path where dot
+# can find it using this tag.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will generate a graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, svg, gif or svg.
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the
+# \mscfile command).
+
+MSCFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = YES
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
diff --git a/include/tdb.h b/include/tdb.h
new file mode 100644
index 0000000..8478ca2
--- /dev/null
+++ b/include/tdb.h
@@ -0,0 +1,911 @@
+#ifndef __TDB_H__
+#define __TDB_H__
+
+/*
+ Unix SMB/CIFS implementation.
+
+ trivial database library
+
+ Copyright (C) Andrew Tridgell 1999-2004
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <signal.h>
+#include <stdbool.h>
+
+/**
+ * @defgroup tdb The tdb API
+ *
+ * tdb is a Trivial database. In concept, it is very much like GDBM, and BSD's
+ * DB except that it allows multiple simultaneous writers and uses locking
+ * internally to keep writers from trampling on each other. tdb is also
+ * extremely small.
+ *
+ * @section tdb_interface Interface
+ *
+ * The interface is very similar to gdbm except for the following:
+ *
+ * <ul>
+ * <li>different open interface. The tdb_open call is more similar to a
+ * traditional open()</li>
+ * <li>no tdbm_reorganise() function</li>
+ * <li>no tdbm_sync() function. No operations are cached in the library
+ * anyway</li>
+ * <li>added a tdb_traverse() function for traversing the whole database</li>
+ * <li>added transactions support</li>
+ * </ul>
+ *
+ * A general rule for using tdb is that the caller frees any returned TDB_DATA
+ * structures. Just call free(p.dptr) to free a TDB_DATA return value called p.
+ * This is the same as gdbm.
+ *
+ * @{
+ */
+
+/** Flags to tdb_store() */
+#define TDB_REPLACE 1 /** Unused */
+#define TDB_INSERT 2 /** Don't overwrite an existing entry */
+#define TDB_MODIFY 3 /** Don't create an existing entry */
+
+/** Flags for tdb_open() */
+#define TDB_DEFAULT 0 /** just a readability place holder */
+#define TDB_CLEAR_IF_FIRST 1 /** If this is the first open, wipe the db */
+#define TDB_INTERNAL 2 /** Don't store on disk */
+#define TDB_NOLOCK 4 /** Don't do any locking */
+#define TDB_NOMMAP 8 /** Don't use mmap */
+#define TDB_CONVERT 16 /** Convert endian (internal use) */
+#define TDB_BIGENDIAN 32 /** Header is big-endian (internal use) */
+#define TDB_NOSYNC 64 /** Don't use synchronous transactions */
+#define TDB_SEQNUM 128 /** Maintain a sequence number */
+#define TDB_VOLATILE 256 /** Activate the per-hashchain freelist, default 5 */
+#define TDB_ALLOW_NESTING 512 /** Allow transactions to nest */
+#define TDB_DISALLOW_NESTING 1024 /** Disallow transactions to nest */
+#define TDB_INCOMPATIBLE_HASH 2048 /** Better hashing: can't be opened by tdb < 1.2.6. */
+#define TDB_MUTEX_LOCKING 4096 /** optimized locking using robust mutexes if supported,
+ only with tdb >= 1.3.0 and TDB_CLEAR_IF_FIRST
+ after checking tdb_runtime_check_for_robust_mutexes() */
+
+/** The tdb error codes */
+enum TDB_ERROR {TDB_SUCCESS=0, TDB_ERR_CORRUPT, TDB_ERR_IO, TDB_ERR_LOCK,
+ TDB_ERR_OOM, TDB_ERR_EXISTS, TDB_ERR_NOLOCK, TDB_ERR_LOCK_TIMEOUT,
+ TDB_ERR_NOEXIST, TDB_ERR_EINVAL, TDB_ERR_RDONLY,
+ TDB_ERR_NESTING};
+
+/** Debugging uses one of the following levels */
+enum tdb_debug_level {TDB_DEBUG_FATAL = 0, TDB_DEBUG_ERROR,
+ TDB_DEBUG_WARNING, TDB_DEBUG_TRACE};
+
+/** The tdb data structure */
+typedef struct TDB_DATA {
+ unsigned char *dptr;
+ size_t dsize;
+} TDB_DATA;
+
+#ifndef PRINTF_ATTRIBUTE
+#if (__GNUC__ >= 3)
+/** Use gcc attribute to check printf fns. a1 is the 1-based index of
+ * the parameter containing the format, and a2 the index of the first
+ * argument. Note that some gcc 2.x versions don't handle this
+ * properly **/
+#define PRINTF_ATTRIBUTE(a1, a2) __attribute__ ((format (__printf__, a1, a2)))
+#else
+#define PRINTF_ATTRIBUTE(a1, a2)
+#endif
+#endif
+
+/** This is the context structure that is returned from a db open. */
+typedef struct tdb_context TDB_CONTEXT;
+
+typedef int (*tdb_traverse_func)(struct tdb_context *, TDB_DATA, TDB_DATA, void *);
+typedef void (*tdb_log_func)(struct tdb_context *, enum tdb_debug_level, const char *, ...) PRINTF_ATTRIBUTE(3, 4);
+typedef unsigned int (*tdb_hash_func)(TDB_DATA *key);
+
+struct tdb_logging_context {
+ tdb_log_func log_fn;
+ void *log_private;
+};
+
+/**
+ * @brief Open the database and creating it if necessary.
+ *
+ * @param[in] name The name of the db to open.
+ *
+ * @param[in] hash_size The hash size is advisory, use zero for a default
+ * value.
+ *
+ * @param[in] tdb_flags The flags to use to open the db:\n\n
+ * TDB_CLEAR_IF_FIRST - Clear database if we are the
+ * only one with it open\n
+ * TDB_INTERNAL - Don't use a file, instead store the
+ * data in memory. The filename is
+ * ignored in this case.\n
+ * TDB_NOLOCK - Don't do any locking\n
+ * TDB_NOMMAP - Don't use mmap\n
+ * TDB_NOSYNC - Don't synchronise transactions to disk\n
+ * TDB_SEQNUM - Maintain a sequence number\n
+ * TDB_VOLATILE - activate the per-hashchain freelist,
+ * default 5.\n
+ * TDB_ALLOW_NESTING - Allow transactions to nest.\n
+ * TDB_DISALLOW_NESTING - Disallow transactions to nest.\n
+ * TDB_INCOMPATIBLE_HASH - Better hashing: can't be opened by tdb < 1.2.6.\n
+ * TDB_MUTEX_LOCKING - Optimized locking using robust mutexes if supported,
+ * can't be opened by tdb < 1.3.0.
+ * Only valid in combination with TDB_CLEAR_IF_FIRST
+ * after checking tdb_runtime_check_for_robust_mutexes()\n
+ *
+ * @param[in] open_flags Flags for the open(2) function.
+ *
+ * @param[in] mode The mode for the open(2) function.
+ *
+ * @return A tdb context structure, NULL on error.
+ */
+struct tdb_context *tdb_open(const char *name, int hash_size, int tdb_flags,
+ int open_flags, mode_t mode);
+
+/**
+ * @brief Open the database and creating it if necessary.
+ *
+ * This is like tdb_open(), but allows you to pass an initial logging and
+ * hash function. Be careful when passing a hash function - all users of the
+ * database must use the same hash function or you will get data corruption.
+ *
+ * @param[in] name The name of the db to open.
+ *
+ * @param[in] hash_size The hash size is advisory, use zero for a default
+ * value.
+ *
+ * @param[in] tdb_flags The flags to use to open the db:\n\n
+ * TDB_CLEAR_IF_FIRST - Clear database if we are the
+ * only one with it open\n
+ * TDB_INTERNAL - Don't use a file, instead store the
+ * data in memory. The filename is
+ * ignored in this case.\n
+ * TDB_NOLOCK - Don't do any locking\n
+ * TDB_NOMMAP - Don't use mmap\n
+ * TDB_NOSYNC - Don't synchronise transactions to disk\n
+ * TDB_SEQNUM - Maintain a sequence number\n
+ * TDB_VOLATILE - activate the per-hashchain freelist,
+ * default 5.\n
+ * TDB_ALLOW_NESTING - Allow transactions to nest.\n
+ * TDB_DISALLOW_NESTING - Disallow transactions to nest.\n
+ * TDB_INCOMPATIBLE_HASH - Better hashing: can't be opened by tdb < 1.2.6.\n
+ * TDB_MUTEX_LOCKING - Optimized locking using robust mutexes if supported,
+ * can't be opened by tdb < 1.3.0.
+ * Only valid in combination with TDB_CLEAR_IF_FIRST
+ * after checking tdb_runtime_check_for_robust_mutexes()\n
+ *
+ * @param[in] open_flags Flags for the open(2) function.
+ *
+ * @param[in] mode The mode for the open(2) function.
+ *
+ * @param[in] log_ctx The logging function to use.
+ *
+ * @param[in] hash_fn The hash function you want to use.
+ *
+ * @return A tdb context structure, NULL on error.
+ *
+ * @see tdb_open()
+ */
+struct tdb_context *tdb_open_ex(const char *name, int hash_size, int tdb_flags,
+ int open_flags, mode_t mode,
+ const struct tdb_logging_context *log_ctx,
+ tdb_hash_func hash_fn);
+
+/**
+ * @brief Set the maximum number of dead records per hash chain.
+ *
+ * @param[in] tdb The database handle to set the maximum.
+ *
+ * @param[in] max_dead The maximum number of dead records per hash chain.
+ */
+void tdb_set_max_dead(struct tdb_context *tdb, int max_dead);
+
+/**
+ * @brief Reopen a tdb.
+ *
+ * This can be used after a fork to ensure that we have an independent seek
+ * pointer from our parent and to re-establish locks.
+ *
+ * @param[in] tdb The database to reopen. It will be free'd on error!
+ *
+ * @return 0 on success, -1 on error.
+ *
+ * @note Don't call tdb_error() after this function cause the tdb context will
+ * be freed on error.
+ */
+int tdb_reopen(struct tdb_context *tdb);
+
+/**
+ * @brief Reopen all tdb's
+ *
+ * If the parent is longlived (ie. a parent daemon architecture), we know it
+ * will keep it's active lock on a tdb opened with CLEAR_IF_FIRST. Thus for
+ * child processes we don't have to add an active lock. This is essential to
+ * improve performance on systems that keep POSIX locks as a non-scalable data
+ * structure in the kernel.
+ *
+ * @param[in] parent_longlived Wether the parent is longlived or not.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int tdb_reopen_all(int parent_longlived);
+
+/**
+ * @brief Set a different tdb logging function.
+ *
+ * @param[in] tdb The tdb to set the logging function.
+ *
+ * @param[in] log_ctx The logging function to set.
+ */
+void tdb_set_logging_function(struct tdb_context *tdb, const struct tdb_logging_context *log_ctx);
+
+/**
+ * @brief Get the tdb last error code.
+ *
+ * @param[in] tdb The tdb to get the error code from.
+ *
+ * @return A TDB_ERROR code.
+ *
+ * @see TDB_ERROR
+ */
+enum TDB_ERROR tdb_error(struct tdb_context *tdb);
+
+/**
+ * @brief Get a error string for the last tdb error
+ *
+ * @param[in] tdb The tdb to get the error code from.
+ *
+ * @return An error string.
+ */
+const char *tdb_errorstr(struct tdb_context *tdb);
+
+/**
+ * @brief Fetch an entry in the database given a key.
+ *
+ * The caller must free the resulting data.
+ *
+ * @param[in] tdb The tdb to fetch the key.
+ *
+ * @param[in] key The key to fetch.
+ *
+ * @return The key entry found in the database, NULL on error with
+ * TDB_ERROR set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+TDB_DATA tdb_fetch(struct tdb_context *tdb, TDB_DATA key);
+
+/**
+ * @brief Hand a record to a parser function without allocating it.
+ *
+ * This function is meant as a fast tdb_fetch alternative for large records
+ * that are frequently read. The "key" and "data" arguments point directly
+ * into the tdb shared memory, they are not aligned at any boundary.
+ *
+ * @warning The parser is called while tdb holds a lock on the record. DO NOT
+ * call other tdb routines from within the parser. Also, for good performance
+ * you should make the parser fast to allow parallel operations.
+ *
+ * @param[in] tdb The tdb to parse the record.
+ *
+ * @param[in] key The key to parse.
+ *
+ * @param[in] parser The parser to use to parse the data.
+ *
+ * @param[in] private_data A private data pointer which is passed to the parser
+ * function.
+ *
+ * @return -1 if the record was not found. If the record was found,
+ * the return value of "parser" is passed up to the caller.
+ */
+int tdb_parse_record(struct tdb_context *tdb, TDB_DATA key,
+ int (*parser)(TDB_DATA key, TDB_DATA data,
+ void *private_data),
+ void *private_data);
+
+/**
+ * @brief Delete an entry in the database given a key.
+ *
+ * @param[in] tdb The tdb to delete the key.
+ *
+ * @param[in] key The key to delete.
+ *
+ * @return 0 on success, -1 if the key doesn't exist.
+ */
+int tdb_delete(struct tdb_context *tdb, TDB_DATA key);
+
+/**
+ * @brief Store an element in the database.
+ *
+ * This replaces any existing element with the same key.
+ *
+ * @param[in] tdb The tdb to store the entry.
+ *
+ * @param[in] key The key to use to store the entry.
+ *
+ * @param[in] dbuf The data to store under the key.
+ *
+ * @param[in] flag The flags to store the key:\n\n
+ * TDB_INSERT: Don't overwrite an existing entry.\n
+ * TDB_MODIFY: Don't create a new entry\n
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_store(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, int flag);
+
+/**
+ * @brief Append data to an entry.
+ *
+ * If the entry doesn't exist, it will create a new one.
+ *
+ * @param[in] tdb The database to use.
+ *
+ * @param[in] key The key to append the data.
+ *
+ * @param[in] new_dbuf The data to append to the key.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_append(struct tdb_context *tdb, TDB_DATA key, TDB_DATA new_dbuf);
+
+/**
+ * @brief Close a database.
+ *
+ * @param[in] tdb The database to close. The context will be free'd.
+ *
+ * @return 0 for success, -1 on error.
+ *
+ * @note Don't call tdb_error() after this function cause the tdb context will
+ * be freed on error.
+ */
+int tdb_close(struct tdb_context *tdb);
+
+/**
+ * @brief Find the first entry in the database and return its key.
+ *
+ * The caller must free the returned data.
+ *
+ * @param[in] tdb The database to use.
+ *
+ * @return The first entry of the database, an empty TDB_DATA entry
+ * if the database is empty.
+ */
+TDB_DATA tdb_firstkey(struct tdb_context *tdb);
+
+/**
+ * @brief Find the next entry in the database, returning its key.
+ *
+ * The caller must free the returned data.
+ *
+ * @param[in] tdb The database to use.
+ *
+ * @param[in] key The key from which you want the next key.
+ *
+ * @return The next entry of the current key, an empty TDB_DATA
+ * entry if there is no entry.
+ */
+TDB_DATA tdb_nextkey(struct tdb_context *tdb, TDB_DATA key);
+
+/**
+ * @brief Traverse the entire database.
+ *
+ * While traversing the function fn(tdb, key, data, state) is called on each
+ * element. If fn is NULL then it is not called. A non-zero return value from
+ * fn() indicates that the traversal should stop. Traversal callbacks may not
+ * start transactions.
+ *
+ * @warning The data buffer given to the callback fn does NOT meet the alignment
+ * restrictions malloc gives you.
+ *
+ * @param[in] tdb The database to traverse.
+ *
+ * @param[in] fn The function to call on each entry.
+ *
+ * @param[in] private_data The private data which should be passed to the
+ * traversing function.
+ *
+ * @return The record count traversed, -1 on error.
+ */
+int tdb_traverse(struct tdb_context *tdb, tdb_traverse_func fn, void *private_data);
+
+/**
+ * @brief Traverse the entire database.
+ *
+ * While traversing the database the function fn(tdb, key, data, state) is
+ * called on each element, but marking the database read only during the
+ * traversal, so any write operations will fail. This allows tdb to use read
+ * locks, which increases the parallelism possible during the traversal.
+ *
+ * @param[in] tdb The database to traverse.
+ *
+ * @param[in] fn The function to call on each entry.
+ *
+ * @param[in] private_data The private data which should be passed to the
+ * traversing function.
+ *
+ * @return The record count traversed, -1 on error.
+ */
+int tdb_traverse_read(struct tdb_context *tdb, tdb_traverse_func fn, void *private_data);
+
+/**
+ * @brief Check if an entry in the database exists.
+ *
+ * @note 1 is returned if the key is found and 0 is returned if not found this
+ * doesn't match the conventions in the rest of this module, but is compatible
+ * with gdbm.
+ *
+ * @param[in] tdb The database to check if the entry exists.
+ *
+ * @param[in] key The key to check if the entry exists.
+ *
+ * @return 1 if the key is found, 0 if not.
+ */
+int tdb_exists(struct tdb_context *tdb, TDB_DATA key);
+
+/**
+ * @brief Lock entire database with a write lock.
+ *
+ * @param[in] tdb The database to lock.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_lockall(struct tdb_context *tdb);
+
+/**
+ * @brief Lock entire database with a write lock.
+ *
+ * This is the non-blocking call.
+ *
+ * @param[in] tdb The database to lock.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_lockall()
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_lockall_nonblock(struct tdb_context *tdb);
+
+/**
+ * @brief Unlock entire database with write lock.
+ *
+ * @param[in] tdb The database to unlock.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_lockall()
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_unlockall(struct tdb_context *tdb);
+
+/**
+ * @brief Lock entire database with a read lock.
+ *
+ * @param[in] tdb The database to lock.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_lockall_read(struct tdb_context *tdb);
+
+/**
+ * @brief Lock entire database with a read lock.
+ *
+ * This is the non-blocking call.
+ *
+ * @param[in] tdb The database to lock.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_lockall_read()
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_lockall_read_nonblock(struct tdb_context *tdb);
+
+/**
+ * @brief Unlock entire database with read lock.
+ *
+ * @param[in] tdb The database to unlock.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_lockall_read()
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_unlockall_read(struct tdb_context *tdb);
+
+/**
+ * @brief Lock entire database with write lock - mark only.
+ *
+ * @todo Add more details.
+ *
+ * @param[in] tdb The database to mark.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_lockall_mark(struct tdb_context *tdb);
+
+/**
+ * @brief Lock entire database with write lock - unmark only.
+ *
+ * @todo Add more details.
+ *
+ * @param[in] tdb The database to mark.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_lockall_unmark(struct tdb_context *tdb);
+
+/**
+ * @brief Get the name of the current tdb file.
+ *
+ * This is useful for external logging functions.
+ *
+ * @param[in] tdb The database to get the name from.
+ *
+ * @return The name of the database.
+ */
+const char *tdb_name(struct tdb_context *tdb);
+
+/**
+ * @brief Get the underlying file descriptor being used by tdb.
+ *
+ * This is useful for external routines that want to check the device/inode
+ * of the fd.
+ *
+ * @param[in] tdb The database to get the fd from.
+ *
+ * @return The file descriptor or -1.
+ */
+int tdb_fd(struct tdb_context *tdb);
+
+/**
+ * @brief Get the current logging function.
+ *
+ * This is useful for external tdb routines that wish to log tdb errors.
+ *
+ * @param[in] tdb The database to get the logging function from.
+ *
+ * @return The logging function of the database.
+ *
+ * @see tdb_get_logging_private()
+ */
+tdb_log_func tdb_log_fn(struct tdb_context *tdb);
+
+/**
+ * @brief Get the private data of the logging function.
+ *
+ * @param[in] tdb The database to get the data from.
+ *
+ * @return The private data pointer of the logging function.
+ *
+ * @see tdb_log_fn()
+ */
+void *tdb_get_logging_private(struct tdb_context *tdb);
+
+/**
+ * @brief Start a transaction.
+ *
+ * All operations after the transaction start can either be committed with
+ * tdb_transaction_commit() or cancelled with tdb_transaction_cancel().
+ *
+ * If you call tdb_transaction_start() again on the same tdb context while a
+ * transaction is in progress, then the same transaction buffer is re-used. The
+ * number of tdb_transaction_{commit,cancel} operations must match the number
+ * of successful tdb_transaction_start() calls.
+ *
+ * Note that transactions are by default disk synchronous, and use a recover
+ * area in the database to automatically recover the database on the next open
+ * if the system crashes during a transaction. You can disable the synchronous
+ * transaction recovery setup using the TDB_NOSYNC flag, which will greatly
+ * speed up operations at the risk of corrupting your database if the system
+ * crashes.
+ *
+ * Operations made within a transaction are not visible to other users of the
+ * database until a successful commit.
+ *
+ * @param[in] tdb The database to start the transaction.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_transaction_start(struct tdb_context *tdb);
+
+/**
+ * @brief Start a transaction, non-blocking.
+ *
+ * @param[in] tdb The database to start the transaction.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ * @see tdb_transaction_start()
+ */
+int tdb_transaction_start_nonblock(struct tdb_context *tdb);
+
+/**
+ * @brief Prepare to commit a current transaction, for two-phase commits.
+ *
+ * Once prepared for commit, the only allowed calls are tdb_transaction_commit()
+ * or tdb_transaction_cancel(). Preparing allocates disk space for the pending
+ * updates, so a subsequent commit should succeed (barring any hardware
+ * failures).
+ *
+ * @param[in] tdb The database to prepare the commit.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_transaction_prepare_commit(struct tdb_context *tdb);
+
+/**
+ * @brief Commit a current transaction.
+ *
+ * This updates the database and releases the current transaction locks.
+ *
+ * @param[in] tdb The database to commit the transaction.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_transaction_commit(struct tdb_context *tdb);
+
+/**
+ * @brief Cancel a current transaction.
+ *
+ * This discards all write and lock operations that have been made since the
+ * transaction started.
+ *
+ * @param[in] tdb The tdb to cancel the transaction on.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_transaction_cancel(struct tdb_context *tdb);
+
+/**
+ * @brief Get the tdb sequence number.
+ *
+ * Only makes sense if the writers opened with TDB_SEQNUM set. Note that this
+ * sequence number will wrap quite quickly, so it should only be used for a
+ * 'has something changed' test, not for code that relies on the count of the
+ * number of changes made. If you want a counter then use a tdb record.
+ *
+ * The aim of this sequence number is to allow for a very lightweight test of a
+ * possible tdb change.
+ *
+ * @param[in] tdb The database to get the sequence number from.
+ *
+ * @return The sequence number or 0.
+ *
+ * @see tdb_open()
+ * @see tdb_enable_seqnum()
+ */
+int tdb_get_seqnum(struct tdb_context *tdb);
+
+/**
+ * @brief Get the hash size.
+ *
+ * @param[in] tdb The database to get the hash size from.
+ *
+ * @return The hash size.
+ */
+int tdb_hash_size(struct tdb_context *tdb);
+
+/**
+ * @brief Get the map size.
+ *
+ * @param[in] tdb The database to get the map size from.
+ *
+ * @return The map size.
+ */
+size_t tdb_map_size(struct tdb_context *tdb);
+
+/**
+ * @brief Get the tdb flags set during open.
+ *
+ * @param[in] tdb The database to get the flags form.
+ *
+ * @return The flags set to on the database.
+ */
+int tdb_get_flags(struct tdb_context *tdb);
+
+/**
+ * @brief Add flags to the database.
+ *
+ * @param[in] tdb The database to add the flags.
+ *
+ * @param[in] flag The tdb flags to add.
+ */
+void tdb_add_flags(struct tdb_context *tdb, unsigned flag);
+
+/**
+ * @brief Remove flags from the database.
+ *
+ * @param[in] tdb The database to remove the flags.
+ *
+ * @param[in] flag The tdb flags to remove.
+ */
+void tdb_remove_flags(struct tdb_context *tdb, unsigned flag);
+
+/**
+ * @brief Enable sequence number handling on an open tdb.
+ *
+ * @param[in] tdb The database to enable sequence number handling.
+ *
+ * @see tdb_get_seqnum()
+ */
+void tdb_enable_seqnum(struct tdb_context *tdb);
+
+/**
+ * @brief Increment the tdb sequence number.
+ *
+ * This only works if the tdb has been opened using the TDB_SEQNUM flag or
+ * enabled using tdb_enable_seqnum().
+ *
+ * @param[in] tdb The database to increment the sequence number.
+ *
+ * @see tdb_enable_seqnum()
+ * @see tdb_get_seqnum()
+ */
+void tdb_increment_seqnum_nonblock(struct tdb_context *tdb);
+
+/**
+ * @brief Create a hash of the key.
+ *
+ * @param[in] key The key to hash
+ *
+ * @return The hash.
+ */
+unsigned int tdb_jenkins_hash(TDB_DATA *key);
+
+/**
+ * @brief Check the consistency of the database.
+ *
+ * This check the consistency of the database calling back the check function
+ * (if non-NULL) on each record. If some consistency check fails, or the
+ * supplied check function returns -1, tdb_check returns -1, otherwise 0.
+ *
+ * @note The logging function (if set) will be called with additional
+ * information on the corruption found.
+ *
+ * @param[in] tdb The database to check.
+ *
+ * @param[in] check The check function to use.
+ *
+ * @param[in] private_data the private data to pass to the check function.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_check(struct tdb_context *tdb,
+ int (*check) (TDB_DATA key, TDB_DATA data, void *private_data),
+ void *private_data);
+
+/**
+ * @brief Dump all possible records in a corrupt database.
+ *
+ * This is the only way to get data out of a database where tdb_check() fails.
+ * It will call walk() with anything which looks like a database record; this
+ * may well include invalid, incomplete or duplicate records.
+ *
+ * @param[in] tdb The database to check.
+ *
+ * @param[in] walk The walk function to use.
+ *
+ * @param[in] private_data the private data to pass to the walk function.
+ *
+ * @return 0 on success, -1 on error with error code set.
+ *
+ * @see tdb_error()
+ * @see tdb_errorstr()
+ */
+int tdb_rescue(struct tdb_context *tdb,
+ void (*walk) (TDB_DATA key, TDB_DATA data, void *private_data),
+ void *private_data);
+
+/**
+ * @brief Check if support for TDB_MUTEX_LOCKING is available at runtime.
+ *
+ * On some systems the API for pthread_mutexattr_setrobust() is not available.
+ * On other systems there are some bugs in the interaction between glibc and
+ * the linux kernel.
+ *
+ * This function provides a runtime check if robust mutexes are really
+ * available.
+ *
+ * This needs to be called and return true before TDB_MUTEX_LOCKING
+ * can be used at runtime.
+ *
+ * @note This calls fork(), but the SIGCHILD handling should be transparent.
+ *
+ * @return true if supported, false otherwise.
+ *
+ * @see TDB_MUTEX_LOCKING
+ */
+bool tdb_runtime_check_for_robust_mutexes(void);
+
+/* @} ******************************************************************/
+
+/* Low level locking functions: use with care */
+int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key);
+int tdb_chainlock_nonblock(struct tdb_context *tdb, TDB_DATA key);
+int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key);
+int tdb_chainlock_read(struct tdb_context *tdb, TDB_DATA key);
+int tdb_chainlock_read_nonblock(struct tdb_context *tdb, TDB_DATA key);
+int tdb_chainunlock_read(struct tdb_context *tdb, TDB_DATA key);
+int tdb_chainlock_mark(struct tdb_context *tdb, TDB_DATA key);
+int tdb_chainlock_unmark(struct tdb_context *tdb, TDB_DATA key);
+
+void tdb_setalarm_sigptr(struct tdb_context *tdb, volatile sig_atomic_t *sigptr);
+
+/* wipe and repack */
+int tdb_wipe_all(struct tdb_context *tdb);
+int tdb_repack(struct tdb_context *tdb);
+
+/* Debug functions. Not used in production. */
+void tdb_dump_all(struct tdb_context *tdb);
+int tdb_printfreelist(struct tdb_context *tdb);
+int tdb_validate_freelist(struct tdb_context *tdb, int *pnum_entries);
+int tdb_freelist_size(struct tdb_context *tdb);
+char *tdb_summary(struct tdb_context *tdb);
+
+extern TDB_DATA tdb_null;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* tdb.h */
diff --git a/lib/replace/.checker_innocent b/lib/replace/.checker_innocent
new file mode 100644
index 0000000..e619176
--- /dev/null
+++ b/lib/replace/.checker_innocent
@@ -0,0 +1,4 @@
+>>>MISTAKE21_create_files_6a9e68ada99a97cb
+>>>MISTAKE21_os2_delete_9b2bfa7f38711d09
+>>>MISTAKE21_os2_delete_2fcc29aaa99a97cb
+>>>SECURITY2_os2_delete_9b2bfa7f1c9396ca
diff --git a/lib/replace/Makefile b/lib/replace/Makefile
new file mode 100644
index 0000000..3649901
--- /dev/null
+++ b/lib/replace/Makefile
@@ -0,0 +1,63 @@
+# simple makefile wrapper to run waf
+
+WAF=WAF_MAKE=1 PATH=buildtools/bin:../../buildtools/bin:$$PATH waf
+
+all:
+ $(WAF) build
+
+install:
+ $(WAF) install
+
+uninstall:
+ $(WAF) uninstall
+
+test:
+ $(WAF) test $(TEST_OPTIONS)
+
+testenv:
+ $(WAF) test --testenv $(TEST_OPTIONS)
+
+quicktest:
+ $(WAF) test --quick $(TEST_OPTIONS)
+
+dist:
+ touch .tmplock
+ WAFLOCK=.tmplock $(WAF) dist
+
+distcheck:
+ touch .tmplock
+ WAFLOCK=.tmplock $(WAF) distcheck
+
+clean:
+ $(WAF) clean
+
+distclean:
+ $(WAF) distclean
+
+reconfigure: configure
+ $(WAF) reconfigure
+
+show_waf_options:
+ $(WAF) --help
+
+# some compatibility make targets
+everything: all
+
+testsuite: all
+
+check: test
+
+torture: all
+
+# this should do an install as well, once install is finished
+installcheck: test
+
+etags:
+ $(WAF) etags
+
+ctags:
+ $(WAF) ctags
+
+bin/%:: FORCE
+ $(WAF) --targets=`basename $@`
+FORCE:
diff --git a/lib/replace/README b/lib/replace/README
new file mode 100644
index 0000000..9dd4f73
--- /dev/null
+++ b/lib/replace/README
@@ -0,0 +1,127 @@
+This subsystem ensures that we can always use a certain core set of
+functions and types, that are either provided by the OS or by replacement
+functions / definitions in this subsystem. The aim is to try to stick
+to POSIX functions in here as much as possible. Convenience functions
+that are available on no platform at all belong in other subsystems
+(such as LIBUTIL).
+
+The following functions are guaranteed:
+
+ftruncate
+strlcpy
+strlcat
+mktime
+rename
+initgroups
+memmove
+strdup
+setlinebuf
+vsyslog
+timegm
+setenv
+unsetenv
+strndup
+strnlen
+waitpid
+seteuid
+setegid
+asprintf
+snprintf
+vasprintf
+vsnprintf
+opendir
+readdir
+telldir
+seekdir
+clock_gettime
+closedir
+dlopen
+dlclose
+dlsym
+dlerror
+chroot
+bzero
+strerror
+errno
+mkdtemp
+mkstemp (a secure one!)
+pread
+pwrite
+chown
+lchown
+readline (the library)
+inet_ntoa
+inet_ntop
+inet_pton
+inet_aton
+strtoll
+strtoull
+socketpair
+strptime
+getaddrinfo
+freeaddrinfo
+getnameinfo
+gai_strerror
+getifaddrs
+freeifaddrs
+utime
+utimes
+dup2
+link
+readlink
+symlink
+realpath
+poll
+setproctitle
+
+Types:
+bool
+socklen_t
+uint{8,16,32,64}_t
+int{8,16,32,64}_t
+intptr_t
+sig_atomic_t
+blksize_t
+blkcnt_t
+
+Constants:
+PATH_NAME_MAX
+UINT{16,32,64}_MAX
+INT32_MAX
+RTLD_LAZY
+HOST_NAME_MAX
+UINT16_MAX
+UINT32_MAX
+UINT64_MAX
+CHAR_BIT
+
+Macros:
+va_copy
+__FUNCTION__
+__FILE__
+__LINE__
+__LINESTR__
+__location__
+__STRING
+__STRINGSTRING
+MIN
+MAX
+QSORT_CAST
+ZERO_STRUCT
+ZERO_STRUCTP
+ZERO_STRUCTPN
+ZERO_ARRAY
+ARRAY_SIZE
+PTR_DIFF
+
+Headers:
+stdint.h
+stdbool.h
+
+Optional C keywords:
+volatile
+
+Prerequisites:
+memset (for bzero)
+syslog (for vsyslog)
+mktemp (for mkstemp and mkdtemp)
diff --git a/lib/replace/configure b/lib/replace/configure
new file mode 100755
index 0000000..6a9f875
--- /dev/null
+++ b/lib/replace/configure
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+PREVPATH=`dirname $0`
+
+if [ -f $PREVPATH/../../buildtools/bin/waf ]; then
+ WAF=../../buildtools/bin/waf
+elif [ -f $PREVPATH/buildtools/bin/waf ]; then
+ WAF=./buildtools/bin/waf
+else
+ echo "replace: Unable to find waf"
+ exit 1
+fi
+
+# using JOBS=1 gives maximum compatibility with
+# systems like AIX which have broken threading in python
+JOBS=1
+export JOBS
+
+cd . || exit 1
+$WAF configure "$@" || exit 1
+cd $PREVPATH
diff --git a/lib/replace/crypt.c b/lib/replace/crypt.c
new file mode 100644
index 0000000..3a067bc
--- /dev/null
+++ b/lib/replace/crypt.c
@@ -0,0 +1,770 @@
+/*
+ This bit of code was derived from the UFC-crypt package which
+ carries the following copyright
+
+ Modified for use by Samba by Andrew Tridgell, October 1994
+
+ Note that this routine is only faster on some machines. Under Linux 1.1.51
+ libc 4.5.26 I actually found this routine to be slightly slower.
+
+ Under SunOS I found a huge speedup by using these routines
+ (a factor of 20 or so)
+
+ Warning: I've had a report from Steve Kennedy <steve@gbnet.org>
+ that this crypt routine may sometimes get the wrong answer. Only
+ use UFC_CRYT if you really need it.
+
+*/
+
+#include "replace.h"
+
+#ifndef HAVE_CRYPT
+
+/*
+ * UFC-crypt: ultra fast crypt(3) implementation
+ *
+ * Copyright (C) 1991-1998, Free Software Foundation, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 3 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * @(#)crypt_util.c 2.31 02/08/92
+ *
+ * Support routines
+ *
+ */
+
+
+#ifndef long32
+#define long32 int32_t
+#endif
+
+#ifndef long64
+#define long64 int64_t
+#endif
+
+#ifndef ufc_long
+#define ufc_long unsigned
+#endif
+
+#ifndef _UFC_64_
+#define _UFC_32_
+#endif
+
+/*
+ * Permutation done once on the 56 bit
+ * key derived from the original 8 byte ASCII key.
+ */
+static int pc1[56] = {
+ 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18,
+ 10, 2, 59, 51, 43, 35, 27, 19, 11, 3, 60, 52, 44, 36,
+ 63, 55, 47, 39, 31, 23, 15, 7, 62, 54, 46, 38, 30, 22,
+ 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 28, 20, 12, 4
+};
+
+/*
+ * How much to rotate each 28 bit half of the pc1 permutated
+ * 56 bit key before using pc2 to give the i' key
+ */
+static int rots[16] = {
+ 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1
+};
+
+/*
+ * Permutation giving the key
+ * of the i' DES round
+ */
+static int pc2[48] = {
+ 14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10,
+ 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2,
+ 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48,
+ 44, 49, 39, 56, 34, 53, 46, 42, 50, 36, 29, 32
+};
+
+/*
+ * The E expansion table which selects
+ * bits from the 32 bit intermediate result.
+ */
+static int esel[48] = {
+ 32, 1, 2, 3, 4, 5, 4, 5, 6, 7, 8, 9,
+ 8, 9, 10, 11, 12, 13, 12, 13, 14, 15, 16, 17,
+ 16, 17, 18, 19, 20, 21, 20, 21, 22, 23, 24, 25,
+ 24, 25, 26, 27, 28, 29, 28, 29, 30, 31, 32, 1
+};
+static int e_inverse[64];
+
+/*
+ * Permutation done on the
+ * result of sbox lookups
+ */
+static int perm32[32] = {
+ 16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10,
+ 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25
+};
+
+/*
+ * The sboxes
+ */
+static int sbox[8][4][16]= {
+ { { 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7 },
+ { 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8 },
+ { 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0 },
+ { 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13 }
+ },
+
+ { { 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10 },
+ { 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5 },
+ { 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15 },
+ { 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9 }
+ },
+
+ { { 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8 },
+ { 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1 },
+ { 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7 },
+ { 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12 }
+ },
+
+ { { 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15 },
+ { 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9 },
+ { 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4 },
+ { 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14 }
+ },
+
+ { { 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9 },
+ { 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6 },
+ { 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14 },
+ { 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3 }
+ },
+
+ { { 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11 },
+ { 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8 },
+ { 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6 },
+ { 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13 }
+ },
+
+ { { 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1 },
+ { 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6 },
+ { 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2 },
+ { 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12 }
+ },
+
+ { { 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7 },
+ { 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2 },
+ { 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8 },
+ { 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11 }
+ }
+};
+
+/*
+ * This is the final
+ * permutation matrix
+ */
+static int final_perm[64] = {
+ 40, 8, 48, 16, 56, 24, 64, 32, 39, 7, 47, 15, 55, 23, 63, 31,
+ 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29,
+ 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27,
+ 34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25
+};
+
+/*
+ * The 16 DES keys in BITMASK format
+ */
+#ifdef _UFC_32_
+long32 _ufc_keytab[16][2];
+#endif
+
+#ifdef _UFC_64_
+long64 _ufc_keytab[16];
+#endif
+
+
+#define ascii_to_bin(c) ((c)>='a'?(c-59):(c)>='A'?((c)-53):(c)-'.')
+#define bin_to_ascii(c) ((c)>=38?((c)-38+'a'):(c)>=12?((c)-12+'A'):(c)+'.')
+
+/* Macro to set a bit (0..23) */
+#define BITMASK(i) ( (1<<(11-(i)%12+3)) << ((i)<12?16:0) )
+
+/*
+ * sb arrays:
+ *
+ * Workhorses of the inner loop of the DES implementation.
+ * They do sbox lookup, shifting of this value, 32 bit
+ * permutation and E permutation for the next round.
+ *
+ * Kept in 'BITMASK' format.
+ */
+
+#ifdef _UFC_32_
+long32 _ufc_sb0[8192], _ufc_sb1[8192], _ufc_sb2[8192], _ufc_sb3[8192];
+static long32 *sb[4] = {_ufc_sb0, _ufc_sb1, _ufc_sb2, _ufc_sb3};
+#endif
+
+#ifdef _UFC_64_
+long64 _ufc_sb0[4096], _ufc_sb1[4096], _ufc_sb2[4096], _ufc_sb3[4096];
+static long64 *sb[4] = {_ufc_sb0, _ufc_sb1, _ufc_sb2, _ufc_sb3};
+#endif
+
+/*
+ * eperm32tab: do 32 bit permutation and E selection
+ *
+ * The first index is the byte number in the 32 bit value to be permuted
+ * - second - is the value of this byte
+ * - third - selects the two 32 bit values
+ *
+ * The table is used and generated internally in init_des to speed it up
+ */
+static ufc_long eperm32tab[4][256][2];
+
+/*
+ * do_pc1: permform pc1 permutation in the key schedule generation.
+ *
+ * The first index is the byte number in the 8 byte ASCII key
+ * - second - - the two 28 bits halfs of the result
+ * - third - selects the 7 bits actually used of each byte
+ *
+ * The result is kept with 28 bit per 32 bit with the 4 most significant
+ * bits zero.
+ */
+static ufc_long do_pc1[8][2][128];
+
+/*
+ * do_pc2: permform pc2 permutation in the key schedule generation.
+ *
+ * The first index is the septet number in the two 28 bit intermediate values
+ * - second - - - septet values
+ *
+ * Knowledge of the structure of the pc2 permutation is used.
+ *
+ * The result is kept with 28 bit per 32 bit with the 4 most significant
+ * bits zero.
+ */
+static ufc_long do_pc2[8][128];
+
+/*
+ * efp: undo an extra e selection and do final
+ * permutation giving the DES result.
+ *
+ * Invoked 6 bit a time on two 48 bit values
+ * giving two 32 bit longs.
+ */
+static ufc_long efp[16][64][2];
+
+static unsigned char bytemask[8] = {
+ 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01
+};
+
+static ufc_long longmask[32] = {
+ 0x80000000, 0x40000000, 0x20000000, 0x10000000,
+ 0x08000000, 0x04000000, 0x02000000, 0x01000000,
+ 0x00800000, 0x00400000, 0x00200000, 0x00100000,
+ 0x00080000, 0x00040000, 0x00020000, 0x00010000,
+ 0x00008000, 0x00004000, 0x00002000, 0x00001000,
+ 0x00000800, 0x00000400, 0x00000200, 0x00000100,
+ 0x00000080, 0x00000040, 0x00000020, 0x00000010,
+ 0x00000008, 0x00000004, 0x00000002, 0x00000001
+};
+
+
+/*
+ * Silly rewrite of 'bzero'. I do so
+ * because some machines don't have
+ * bzero and some don't have memset.
+ */
+
+static void clearmem(char *start, int cnt)
+ { while(cnt--)
+ *start++ = '\0';
+ }
+
+static int initialized = 0;
+
+/* lookup a 6 bit value in sbox */
+
+#define s_lookup(i,s) sbox[(i)][(((s)>>4) & 0x2)|((s) & 0x1)][((s)>>1) & 0xf];
+
+/*
+ * Initialize unit - may be invoked directly
+ * by fcrypt users.
+ */
+
+static void ufc_init_des(void)
+ { int comes_from_bit;
+ int bit, sg;
+ ufc_long j;
+ ufc_long mask1, mask2;
+
+ /*
+ * Create the do_pc1 table used
+ * to affect pc1 permutation
+ * when generating keys
+ */
+ for(bit = 0; bit < 56; bit++) {
+ comes_from_bit = pc1[bit] - 1;
+ mask1 = bytemask[comes_from_bit % 8 + 1];
+ mask2 = longmask[bit % 28 + 4];
+ for(j = 0; j < 128; j++) {
+ if(j & mask1)
+ do_pc1[comes_from_bit / 8][bit / 28][j] |= mask2;
+ }
+ }
+
+ /*
+ * Create the do_pc2 table used
+ * to affect pc2 permutation when
+ * generating keys
+ */
+ for(bit = 0; bit < 48; bit++) {
+ comes_from_bit = pc2[bit] - 1;
+ mask1 = bytemask[comes_from_bit % 7 + 1];
+ mask2 = BITMASK(bit % 24);
+ for(j = 0; j < 128; j++) {
+ if(j & mask1)
+ do_pc2[comes_from_bit / 7][j] |= mask2;
+ }
+ }
+
+ /*
+ * Now generate the table used to do combined
+ * 32 bit permutation and e expansion
+ *
+ * We use it because we have to permute 16384 32 bit
+ * longs into 48 bit in order to initialize sb.
+ *
+ * Looping 48 rounds per permutation becomes
+ * just too slow...
+ *
+ */
+
+ clearmem((char*)eperm32tab, sizeof(eperm32tab));
+
+ for(bit = 0; bit < 48; bit++) {
+ ufc_long inner_mask1,comes_from;
+
+ comes_from = perm32[esel[bit]-1]-1;
+ inner_mask1 = bytemask[comes_from % 8];
+
+ for(j = 256; j--;) {
+ if(j & inner_mask1)
+ eperm32tab[comes_from / 8][j][bit / 24] |= BITMASK(bit % 24);
+ }
+ }
+
+ /*
+ * Create the sb tables:
+ *
+ * For each 12 bit segment of an 48 bit intermediate
+ * result, the sb table precomputes the two 4 bit
+ * values of the sbox lookups done with the two 6
+ * bit halves, shifts them to their proper place,
+ * sends them through perm32 and finally E expands
+ * them so that they are ready for the next
+ * DES round.
+ *
+ */
+ for(sg = 0; sg < 4; sg++) {
+ int j1, j2;
+ int s1, s2;
+
+ for(j1 = 0; j1 < 64; j1++) {
+ s1 = s_lookup(2 * sg, j1);
+ for(j2 = 0; j2 < 64; j2++) {
+ ufc_long to_permute, inx;
+
+ s2 = s_lookup(2 * sg + 1, j2);
+ to_permute = ((s1 << 4) | s2) << (24 - 8 * sg);
+
+#ifdef _UFC_32_
+ inx = ((j1 << 6) | j2) << 1;
+ sb[sg][inx ] = eperm32tab[0][(to_permute >> 24) & 0xff][0];
+ sb[sg][inx+1] = eperm32tab[0][(to_permute >> 24) & 0xff][1];
+ sb[sg][inx ] |= eperm32tab[1][(to_permute >> 16) & 0xff][0];
+ sb[sg][inx+1] |= eperm32tab[1][(to_permute >> 16) & 0xff][1];
+ sb[sg][inx ] |= eperm32tab[2][(to_permute >> 8) & 0xff][0];
+ sb[sg][inx+1] |= eperm32tab[2][(to_permute >> 8) & 0xff][1];
+ sb[sg][inx ] |= eperm32tab[3][(to_permute) & 0xff][0];
+ sb[sg][inx+1] |= eperm32tab[3][(to_permute) & 0xff][1];
+#endif
+#ifdef _UFC_64_
+ inx = ((j1 << 6) | j2);
+ sb[sg][inx] =
+ ((long64)eperm32tab[0][(to_permute >> 24) & 0xff][0] << 32) |
+ (long64)eperm32tab[0][(to_permute >> 24) & 0xff][1];
+ sb[sg][inx] |=
+ ((long64)eperm32tab[1][(to_permute >> 16) & 0xff][0] << 32) |
+ (long64)eperm32tab[1][(to_permute >> 16) & 0xff][1];
+ sb[sg][inx] |=
+ ((long64)eperm32tab[2][(to_permute >> 8) & 0xff][0] << 32) |
+ (long64)eperm32tab[2][(to_permute >> 8) & 0xff][1];
+ sb[sg][inx] |=
+ ((long64)eperm32tab[3][(to_permute) & 0xff][0] << 32) |
+ (long64)eperm32tab[3][(to_permute) & 0xff][1];
+#endif
+ }
+ }
+ }
+
+ /*
+ * Create an inverse matrix for esel telling
+ * where to plug out bits if undoing it
+ */
+ for(bit=48; bit--;) {
+ e_inverse[esel[bit] - 1 ] = bit;
+ e_inverse[esel[bit] - 1 + 32] = bit + 48;
+ }
+
+ /*
+ * create efp: the matrix used to
+ * undo the E expansion and effect final permutation
+ */
+ clearmem((char*)efp, sizeof efp);
+ for(bit = 0; bit < 64; bit++) {
+ int o_bit, o_long;
+ ufc_long word_value, inner_mask1, inner_mask2;
+ int comes_from_f_bit, comes_from_e_bit;
+ int comes_from_word, bit_within_word;
+
+ /* See where bit i belongs in the two 32 bit long's */
+ o_long = bit / 32; /* 0..1 */
+ o_bit = bit % 32; /* 0..31 */
+
+ /*
+ * And find a bit in the e permutated value setting this bit.
+ *
+ * Note: the e selection may have selected the same bit several
+ * times. By the initialization of e_inverse, we only look
+ * for one specific instance.
+ */
+ comes_from_f_bit = final_perm[bit] - 1; /* 0..63 */
+ comes_from_e_bit = e_inverse[comes_from_f_bit]; /* 0..95 */
+ comes_from_word = comes_from_e_bit / 6; /* 0..15 */
+ bit_within_word = comes_from_e_bit % 6; /* 0..5 */
+
+ inner_mask1 = longmask[bit_within_word + 26];
+ inner_mask2 = longmask[o_bit];
+
+ for(word_value = 64; word_value--;) {
+ if(word_value & inner_mask1)
+ efp[comes_from_word][word_value][o_long] |= inner_mask2;
+ }
+ }
+ initialized++;
+ }
+
+/*
+ * Process the elements of the sb table permuting the
+ * bits swapped in the expansion by the current salt.
+ */
+
+#ifdef _UFC_32_
+static void shuffle_sb(long32 *k, ufc_long saltbits)
+ { ufc_long j;
+ long32 x;
+ for(j=4096; j--;) {
+ x = (k[0] ^ k[1]) & (long32)saltbits;
+ *k++ ^= x;
+ *k++ ^= x;
+ }
+ }
+#endif
+
+#ifdef _UFC_64_
+static void shuffle_sb(long64 *k, ufc_long saltbits)
+ { ufc_long j;
+ long64 x;
+ for(j=4096; j--;) {
+ x = ((*k >> 32) ^ *k) & (long64)saltbits;
+ *k++ ^= (x << 32) | x;
+ }
+ }
+#endif
+
+/*
+ * Setup the unit for a new salt
+ * Hopefully we'll not see a new salt in each crypt call.
+ */
+
+static unsigned char current_salt[3] = "&&"; /* invalid value */
+static ufc_long current_saltbits = 0;
+static int direction = 0;
+
+static void setup_salt(const char *s1)
+ { ufc_long i, j, saltbits;
+ const unsigned char *s2 = (const unsigned char *)s1;
+
+ if(!initialized)
+ ufc_init_des();
+
+ if(s2[0] == current_salt[0] && s2[1] == current_salt[1])
+ return;
+ current_salt[0] = s2[0]; current_salt[1] = s2[1];
+
+ /*
+ * This is the only crypt change to DES:
+ * entries are swapped in the expansion table
+ * according to the bits set in the salt.
+ */
+ saltbits = 0;
+ for(i = 0; i < 2; i++) {
+ long c=ascii_to_bin(s2[i]);
+ if(c < 0 || c > 63)
+ c = 0;
+ for(j = 0; j < 6; j++) {
+ if((c >> j) & 0x1)
+ saltbits |= BITMASK(6 * i + j);
+ }
+ }
+
+ /*
+ * Permute the sb table values
+ * to reflect the changed e
+ * selection table
+ */
+ shuffle_sb(_ufc_sb0, current_saltbits ^ saltbits);
+ shuffle_sb(_ufc_sb1, current_saltbits ^ saltbits);
+ shuffle_sb(_ufc_sb2, current_saltbits ^ saltbits);
+ shuffle_sb(_ufc_sb3, current_saltbits ^ saltbits);
+
+ current_saltbits = saltbits;
+ }
+
+static void ufc_mk_keytab(char *key)
+ { ufc_long v1, v2, *k1;
+ int i;
+#ifdef _UFC_32_
+ long32 v, *k2 = &_ufc_keytab[0][0];
+#endif
+#ifdef _UFC_64_
+ long64 v, *k2 = &_ufc_keytab[0];
+#endif
+
+ v1 = v2 = 0; k1 = &do_pc1[0][0][0];
+ for(i = 8; i--;) {
+ v1 |= k1[*key & 0x7f]; k1 += 128;
+ v2 |= k1[*key++ & 0x7f]; k1 += 128;
+ }
+
+ for(i = 0; i < 16; i++) {
+ k1 = &do_pc2[0][0];
+
+ v1 = (v1 << rots[i]) | (v1 >> (28 - rots[i]));
+ v = k1[(v1 >> 21) & 0x7f]; k1 += 128;
+ v |= k1[(v1 >> 14) & 0x7f]; k1 += 128;
+ v |= k1[(v1 >> 7) & 0x7f]; k1 += 128;
+ v |= k1[(v1 ) & 0x7f]; k1 += 128;
+
+#ifdef _UFC_32_
+ *k2++ = v;
+ v = 0;
+#endif
+#ifdef _UFC_64_
+ v <<= 32;
+#endif
+
+ v2 = (v2 << rots[i]) | (v2 >> (28 - rots[i]));
+ v |= k1[(v2 >> 21) & 0x7f]; k1 += 128;
+ v |= k1[(v2 >> 14) & 0x7f]; k1 += 128;
+ v |= k1[(v2 >> 7) & 0x7f]; k1 += 128;
+ v |= k1[(v2 ) & 0x7f];
+
+ *k2++ = v;
+ }
+
+ direction = 0;
+ }
+
+/*
+ * Undo an extra E selection and do final permutations
+ */
+
+ufc_long *_ufc_dofinalperm(ufc_long l1, ufc_long l2, ufc_long r1, ufc_long r2)
+ { ufc_long v1, v2, x;
+ static ufc_long ary[2];
+
+ x = (l1 ^ l2) & current_saltbits; l1 ^= x; l2 ^= x;
+ x = (r1 ^ r2) & current_saltbits; r1 ^= x; r2 ^= x;
+
+ v1=v2=0; l1 >>= 3; l2 >>= 3; r1 >>= 3; r2 >>= 3;
+
+ v1 |= efp[15][ r2 & 0x3f][0]; v2 |= efp[15][ r2 & 0x3f][1];
+ v1 |= efp[14][(r2 >>= 6) & 0x3f][0]; v2 |= efp[14][ r2 & 0x3f][1];
+ v1 |= efp[13][(r2 >>= 10) & 0x3f][0]; v2 |= efp[13][ r2 & 0x3f][1];
+ v1 |= efp[12][(r2 >>= 6) & 0x3f][0]; v2 |= efp[12][ r2 & 0x3f][1];
+
+ v1 |= efp[11][ r1 & 0x3f][0]; v2 |= efp[11][ r1 & 0x3f][1];
+ v1 |= efp[10][(r1 >>= 6) & 0x3f][0]; v2 |= efp[10][ r1 & 0x3f][1];
+ v1 |= efp[ 9][(r1 >>= 10) & 0x3f][0]; v2 |= efp[ 9][ r1 & 0x3f][1];
+ v1 |= efp[ 8][(r1 >>= 6) & 0x3f][0]; v2 |= efp[ 8][ r1 & 0x3f][1];
+
+ v1 |= efp[ 7][ l2 & 0x3f][0]; v2 |= efp[ 7][ l2 & 0x3f][1];
+ v1 |= efp[ 6][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 6][ l2 & 0x3f][1];
+ v1 |= efp[ 5][(l2 >>= 10) & 0x3f][0]; v2 |= efp[ 5][ l2 & 0x3f][1];
+ v1 |= efp[ 4][(l2 >>= 6) & 0x3f][0]; v2 |= efp[ 4][ l2 & 0x3f][1];
+
+ v1 |= efp[ 3][ l1 & 0x3f][0]; v2 |= efp[ 3][ l1 & 0x3f][1];
+ v1 |= efp[ 2][(l1 >>= 6) & 0x3f][0]; v2 |= efp[ 2][ l1 & 0x3f][1];
+ v1 |= efp[ 1][(l1 >>= 10) & 0x3f][0]; v2 |= efp[ 1][ l1 & 0x3f][1];
+ v1 |= efp[ 0][(l1 >>= 6) & 0x3f][0]; v2 |= efp[ 0][ l1 & 0x3f][1];
+
+ ary[0] = v1; ary[1] = v2;
+ return ary;
+ }
+
+/*
+ * crypt only: convert from 64 bit to 11 bit ASCII
+ * prefixing with the salt
+ */
+
+static char *output_conversion(ufc_long v1, ufc_long v2, const char *salt)
+ { static char outbuf[14];
+ int i, s;
+
+ outbuf[0] = salt[0];
+ outbuf[1] = salt[1] ? salt[1] : salt[0];
+
+ for(i = 0; i < 5; i++)
+ outbuf[i + 2] = bin_to_ascii((v1 >> (26 - 6 * i)) & 0x3f);
+
+ s = (v2 & 0xf) << 2;
+ v2 = (v2 >> 2) | ((v1 & 0x3) << 30);
+
+ for(i = 5; i < 10; i++)
+ outbuf[i + 2] = bin_to_ascii((v2 >> (56 - 6 * i)) & 0x3f);
+
+ outbuf[12] = bin_to_ascii(s);
+ outbuf[13] = 0;
+
+ return outbuf;
+ }
+
+/*
+ * UNIX crypt function
+ */
+
+static ufc_long *_ufc_doit(ufc_long , ufc_long, ufc_long, ufc_long, ufc_long);
+
+char *ufc_crypt(const char *key,const char *salt)
+ { ufc_long *s;
+ char ktab[9];
+
+ /*
+ * Hack DES tables according to salt
+ */
+ setup_salt(salt);
+
+ /*
+ * Setup key schedule
+ */
+ clearmem(ktab, sizeof ktab);
+ strncpy(ktab, key, 8);
+ ufc_mk_keytab(ktab);
+
+ /*
+ * Go for the 25 DES encryptions
+ */
+ s = _ufc_doit((ufc_long)0, (ufc_long)0,
+ (ufc_long)0, (ufc_long)0, (ufc_long)25);
+
+ /*
+ * And convert back to 6 bit ASCII
+ */
+ return output_conversion(s[0], s[1], salt);
+ }
+
+
+#ifdef _UFC_32_
+
+/*
+ * 32 bit version
+ */
+
+extern long32 _ufc_keytab[16][2];
+extern long32 _ufc_sb0[], _ufc_sb1[], _ufc_sb2[], _ufc_sb3[];
+
+#define SBA(sb, v) (*(long32*)((char*)(sb)+(v)))
+
+static ufc_long *_ufc_doit(ufc_long l1, ufc_long l2, ufc_long r1, ufc_long r2, ufc_long itr)
+ { int i;
+ long32 s, *k;
+
+ while(itr--) {
+ k = &_ufc_keytab[0][0];
+ for(i=8; i--; ) {
+ s = *k++ ^ r1;
+ l1 ^= SBA(_ufc_sb1, s & 0xffff); l2 ^= SBA(_ufc_sb1, (s & 0xffff)+4);
+ l1 ^= SBA(_ufc_sb0, s >>= 16); l2 ^= SBA(_ufc_sb0, (s) +4);
+ s = *k++ ^ r2;
+ l1 ^= SBA(_ufc_sb3, s & 0xffff); l2 ^= SBA(_ufc_sb3, (s & 0xffff)+4);
+ l1 ^= SBA(_ufc_sb2, s >>= 16); l2 ^= SBA(_ufc_sb2, (s) +4);
+
+ s = *k++ ^ l1;
+ r1 ^= SBA(_ufc_sb1, s & 0xffff); r2 ^= SBA(_ufc_sb1, (s & 0xffff)+4);
+ r1 ^= SBA(_ufc_sb0, s >>= 16); r2 ^= SBA(_ufc_sb0, (s) +4);
+ s = *k++ ^ l2;
+ r1 ^= SBA(_ufc_sb3, s & 0xffff); r2 ^= SBA(_ufc_sb3, (s & 0xffff)+4);
+ r1 ^= SBA(_ufc_sb2, s >>= 16); r2 ^= SBA(_ufc_sb2, (s) +4);
+ }
+ s=l1; l1=r1; r1=s; s=l2; l2=r2; r2=s;
+ }
+ return _ufc_dofinalperm(l1, l2, r1, r2);
+ }
+
+#endif
+
+#ifdef _UFC_64_
+
+/*
+ * 64 bit version
+ */
+
+extern long64 _ufc_keytab[16];
+extern long64 _ufc_sb0[], _ufc_sb1[], _ufc_sb2[], _ufc_sb3[];
+
+#define SBA(sb, v) (*(long64*)((char*)(sb)+(v)))
+
+static ufc_long *_ufc_doit(ufc_long l1, ufc_long l2, ufc_long r1, ufc_long r2, ufc_long itr)
+ { int i;
+ long64 l, r, s, *k;
+
+ l = (((long64)l1) << 32) | ((long64)l2);
+ r = (((long64)r1) << 32) | ((long64)r2);
+
+ while(itr--) {
+ k = &_ufc_keytab[0];
+ for(i=8; i--; ) {
+ s = *k++ ^ r;
+ l ^= SBA(_ufc_sb3, (s >> 0) & 0xffff);
+ l ^= SBA(_ufc_sb2, (s >> 16) & 0xffff);
+ l ^= SBA(_ufc_sb1, (s >> 32) & 0xffff);
+ l ^= SBA(_ufc_sb0, (s >> 48) & 0xffff);
+
+ s = *k++ ^ l;
+ r ^= SBA(_ufc_sb3, (s >> 0) & 0xffff);
+ r ^= SBA(_ufc_sb2, (s >> 16) & 0xffff);
+ r ^= SBA(_ufc_sb1, (s >> 32) & 0xffff);
+ r ^= SBA(_ufc_sb0, (s >> 48) & 0xffff);
+ }
+ s=l; l=r; r=s;
+ }
+
+ l1 = l >> 32; l2 = l & 0xffffffff;
+ r1 = r >> 32; r2 = r & 0xffffffff;
+ return _ufc_dofinalperm(l1, l2, r1, r2);
+ }
+
+#endif
+
+
+#else
+ int ufc_dummy_procedure(void);
+ int ufc_dummy_procedure(void) {return 0;}
+#endif
diff --git a/lib/replace/cwrap.c b/lib/replace/cwrap.c
new file mode 100644
index 0000000..adc5c1e
--- /dev/null
+++ b/lib/replace/cwrap.c
@@ -0,0 +1,46 @@
+/*
+ * Unix SMB/CIFS implementation.
+ *
+ * Replaceable functions by cwrap
+ *
+ * Copyright (c) 2014 Andreas Schneider <asn@samba.org>
+ *
+ * ** NOTE! The following LGPL license applies to the replace
+ * ** library. This does NOT imply that all of Samba is released
+ * ** under the LGPL
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 3 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "replace.h"
+
+bool nss_wrapper_enabled(void)
+{
+ return false;
+}
+
+bool nss_wrapper_hosts_enabled(void)
+{
+ return false;
+}
+
+bool socket_wrapper_enabled(void)
+{
+ return false;
+}
+
+bool uid_wrapper_enabled(void)
+{
+ return false;
+}
diff --git a/lib/replace/dlfcn.c b/lib/replace/dlfcn.c
new file mode 100644
index 0000000..88431ed
--- /dev/null
+++ b/lib/replace/dlfcn.c
@@ -0,0 +1,76 @@
+/*
+ Unix SMB/CIFS implementation.
+ Samba system utilities
+ Copyright (C) Andrew Tridgell 1992-1998
+ Copyright (C) Jeremy Allison 1998-2002
+ Copyright (C) Jelmer Vernooij 2006
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#ifdef HAVE_DL_H
+#include <dl.h>
+#endif
+
+#ifndef HAVE_DLOPEN
+#ifdef DLOPEN_TAKES_UNSIGNED_FLAGS
+void *rep_dlopen(const char *name, unsigned int flags)
+#else
+void *rep_dlopen(const char *name, int flags)
+#endif
+{
+#ifdef HAVE_SHL_LOAD
+ if (name == NULL)
+ return PROG_HANDLE;
+ return (void *)shl_load(name, flags, 0);
+#else
+ return NULL;
+#endif
+}
+#endif
+
+#ifndef HAVE_DLSYM
+void *rep_dlsym(void *handle, const char *symbol)
+{
+#ifdef HAVE_SHL_FINDSYM
+ void *sym_addr;
+ if (!shl_findsym((shl_t *)&handle, symbol, TYPE_UNDEFINED, &sym_addr))
+ return sym_addr;
+#endif
+ return NULL;
+}
+#endif
+
+#ifndef HAVE_DLERROR
+char *rep_dlerror(void)
+{
+ return "dynamic loading of objects not supported on this platform";
+}
+#endif
+
+#ifndef HAVE_DLCLOSE
+int rep_dlclose(void *handle)
+{
+#ifdef HAVE_SHL_CLOSE
+ return shl_unload((shl_t)handle);
+#else
+ return 0;
+#endif
+}
+#endif
diff --git a/lib/replace/getaddrinfo.c b/lib/replace/getaddrinfo.c
new file mode 100644
index 0000000..8440d8e
--- /dev/null
+++ b/lib/replace/getaddrinfo.c
@@ -0,0 +1,493 @@
+/*
+PostgreSQL Database Management System
+(formerly known as Postgres, then as Postgres95)
+
+Portions Copyright (c) 1996-2005, The PostgreSQL Global Development Group
+
+Portions Copyright (c) 1994, The Regents of the University of California
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose, without fee, and without a written agreement
+is hereby granted, provided that the above copyright notice and this paragraph
+and the following two paragraphs appear in all copies.
+
+IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
+DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
+LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
+EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+
+THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS
+TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
+*/
+
+/*-------------------------------------------------------------------------
+ *
+ * getaddrinfo.c
+ * Support getaddrinfo() on platforms that don't have it.
+ *
+ * We also supply getnameinfo() here, assuming that the platform will have
+ * it if and only if it has getaddrinfo(). If this proves false on some
+ * platform, we'll need to split this file and provide a separate configure
+ * test for getnameinfo().
+ *
+ * Copyright (c) 2003-2007, PostgreSQL Global Development Group
+ *
+ * Copyright (C) 2007 Jeremy Allison.
+ * Modified to return multiple IPv4 addresses for Samba.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "replace.h"
+#include "system/network.h"
+
+#ifndef SMB_MALLOC
+#define SMB_MALLOC(s) malloc(s)
+#endif
+
+#ifndef SMB_STRDUP
+#define SMB_STRDUP(s) strdup(s)
+#endif
+
+static int check_hostent_err(struct hostent *hp)
+{
+ if (!hp) {
+ switch (h_errno) {
+ case HOST_NOT_FOUND:
+ case NO_DATA:
+ return EAI_NONAME;
+ case TRY_AGAIN:
+ return EAI_AGAIN;
+ case NO_RECOVERY:
+ default:
+ return EAI_FAIL;
+ }
+ }
+ if (!hp->h_name || hp->h_addrtype != AF_INET) {
+ return EAI_FAIL;
+ }
+ return 0;
+}
+
+static char *canon_name_from_hostent(struct hostent *hp,
+ int *perr)
+{
+ char *ret = NULL;
+
+ *perr = check_hostent_err(hp);
+ if (*perr) {
+ return NULL;
+ }
+ ret = SMB_STRDUP(hp->h_name);
+ if (!ret) {
+ *perr = EAI_MEMORY;
+ }
+ return ret;
+}
+
+static char *get_my_canon_name(int *perr)
+{
+ char name[HOST_NAME_MAX+1];
+
+ if (gethostname(name, HOST_NAME_MAX) == -1) {
+ *perr = EAI_FAIL;
+ return NULL;
+ }
+ /* Ensure null termination. */
+ name[HOST_NAME_MAX] = '\0';
+ return canon_name_from_hostent(gethostbyname(name), perr);
+}
+
+static char *get_canon_name_from_addr(struct in_addr ip,
+ int *perr)
+{
+ return canon_name_from_hostent(
+ gethostbyaddr(&ip, sizeof(ip), AF_INET),
+ perr);
+}
+
+static struct addrinfo *alloc_entry(const struct addrinfo *hints,
+ struct in_addr ip,
+ unsigned short port)
+{
+ struct sockaddr_in *psin = NULL;
+ struct addrinfo *ai = SMB_MALLOC(sizeof(*ai));
+
+ if (!ai) {
+ return NULL;
+ }
+ memset(ai, '\0', sizeof(*ai));
+
+ psin = SMB_MALLOC(sizeof(*psin));
+ if (!psin) {
+ free(ai);
+ return NULL;
+ }
+
+ memset(psin, '\0', sizeof(*psin));
+
+ psin->sin_family = AF_INET;
+ psin->sin_port = htons(port);
+ psin->sin_addr = ip;
+
+ ai->ai_flags = 0;
+ ai->ai_family = AF_INET;
+ ai->ai_socktype = hints->ai_socktype;
+ ai->ai_protocol = hints->ai_protocol;
+ ai->ai_addrlen = sizeof(*psin);
+ ai->ai_addr = (struct sockaddr *) psin;
+ ai->ai_canonname = NULL;
+ ai->ai_next = NULL;
+
+ return ai;
+}
+
+/*
+ * get address info for a single ipv4 address.
+ *
+ * Bugs: - servname can only be a number, not text.
+ */
+
+static int getaddr_info_single_addr(const char *service,
+ uint32_t addr,
+ const struct addrinfo *hints,
+ struct addrinfo **res)
+{
+
+ struct addrinfo *ai = NULL;
+ struct in_addr ip;
+ unsigned short port = 0;
+
+ if (service) {
+ port = (unsigned short)atoi(service);
+ }
+ ip.s_addr = htonl(addr);
+
+ ai = alloc_entry(hints, ip, port);
+ if (!ai) {
+ return EAI_MEMORY;
+ }
+
+ /* If we're asked for the canonical name,
+ * make sure it returns correctly. */
+ if (!(hints->ai_flags & AI_NUMERICSERV) &&
+ hints->ai_flags & AI_CANONNAME) {
+ int err;
+ if (addr == INADDR_LOOPBACK || addr == INADDR_ANY) {
+ ai->ai_canonname = get_my_canon_name(&err);
+ } else {
+ ai->ai_canonname =
+ get_canon_name_from_addr(ip,&err);
+ }
+ if (ai->ai_canonname == NULL) {
+ freeaddrinfo(ai);
+ return err;
+ }
+ }
+
+ *res = ai;
+ return 0;
+}
+
+/*
+ * get address info for multiple ipv4 addresses.
+ *
+ * Bugs: - servname can only be a number, not text.
+ */
+
+static int getaddr_info_name(const char *node,
+ const char *service,
+ const struct addrinfo *hints,
+ struct addrinfo **res)
+{
+ struct addrinfo *listp = NULL, *prevp = NULL;
+ char **pptr = NULL;
+ int err;
+ struct hostent *hp = NULL;
+ unsigned short port = 0;
+
+ if (service) {
+ port = (unsigned short)atoi(service);
+ }
+
+ hp = gethostbyname(node);
+ err = check_hostent_err(hp);
+ if (err) {
+ return err;
+ }
+
+ for(pptr = hp->h_addr_list; *pptr; pptr++) {
+ struct in_addr ip = *(struct in_addr *)*pptr;
+ struct addrinfo *ai = alloc_entry(hints, ip, port);
+
+ if (!ai) {
+ freeaddrinfo(listp);
+ return EAI_MEMORY;
+ }
+
+ if (!listp) {
+ listp = ai;
+ prevp = ai;
+ ai->ai_canonname = SMB_STRDUP(hp->h_name);
+ if (!ai->ai_canonname) {
+ freeaddrinfo(listp);
+ return EAI_MEMORY;
+ }
+ } else {
+ prevp->ai_next = ai;
+ prevp = ai;
+ }
+ }
+ *res = listp;
+ return 0;
+}
+
+/*
+ * get address info for ipv4 sockets.
+ *
+ * Bugs: - servname can only be a number, not text.
+ */
+
+int rep_getaddrinfo(const char *node,
+ const char *service,
+ const struct addrinfo * hintp,
+ struct addrinfo ** res)
+{
+ struct addrinfo hints;
+
+ /* Setup the hints struct. */
+ if (hintp == NULL) {
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_INET;
+ hints.ai_socktype = SOCK_STREAM;
+ } else {
+ memcpy(&hints, hintp, sizeof(hints));
+ }
+
+ if (hints.ai_family != AF_INET && hints.ai_family != AF_UNSPEC) {
+ return EAI_FAMILY;
+ }
+
+ if (hints.ai_socktype == 0) {
+ hints.ai_socktype = SOCK_STREAM;
+ }
+
+ if (!node && !service) {
+ return EAI_NONAME;
+ }
+
+ if (node) {
+ if (node[0] == '\0') {
+ return getaddr_info_single_addr(service,
+ INADDR_ANY,
+ &hints,
+ res);
+ } else if (hints.ai_flags & AI_NUMERICHOST) {
+ struct in_addr ip;
+ if (!inet_aton(node, &ip)) {
+ return EAI_FAIL;
+ }
+ return getaddr_info_single_addr(service,
+ ntohl(ip.s_addr),
+ &hints,
+ res);
+ } else {
+ return getaddr_info_name(node,
+ service,
+ &hints,
+ res);
+ }
+ } else if (hints.ai_flags & AI_PASSIVE) {
+ return getaddr_info_single_addr(service,
+ INADDR_ANY,
+ &hints,
+ res);
+ }
+ return getaddr_info_single_addr(service,
+ INADDR_LOOPBACK,
+ &hints,
+ res);
+}
+
+
+void rep_freeaddrinfo(struct addrinfo *res)
+{
+ struct addrinfo *next = NULL;
+
+ for (;res; res = next) {
+ next = res->ai_next;
+ free(res->ai_canonname);
+ free(res->ai_addr);
+ free(res);
+ }
+}
+
+
+const char *rep_gai_strerror(int errcode)
+{
+#ifdef HAVE_HSTRERROR
+ int hcode;
+
+ switch (errcode)
+ {
+ case EAI_NONAME:
+ hcode = HOST_NOT_FOUND;
+ break;
+ case EAI_AGAIN:
+ hcode = TRY_AGAIN;
+ break;
+ case EAI_FAIL:
+ default:
+ hcode = NO_RECOVERY;
+ break;
+ }
+
+ return hstrerror(hcode);
+#else /* !HAVE_HSTRERROR */
+
+ switch (errcode)
+ {
+ case EAI_NONAME:
+ return "Unknown host";
+ case EAI_AGAIN:
+ return "Host name lookup failure";
+#ifdef EAI_BADFLAGS
+ case EAI_BADFLAGS:
+ return "Invalid argument";
+#endif
+#ifdef EAI_FAMILY
+ case EAI_FAMILY:
+ return "Address family not supported";
+#endif
+#ifdef EAI_MEMORY
+ case EAI_MEMORY:
+ return "Not enough memory";
+#endif
+#ifdef EAI_NODATA
+ case EAI_NODATA:
+ return "No host data of that type was found";
+#endif
+#ifdef EAI_SERVICE
+ case EAI_SERVICE:
+ return "Class type not found";
+#endif
+#ifdef EAI_SOCKTYPE
+ case EAI_SOCKTYPE:
+ return "Socket type not supported";
+#endif
+ default:
+ return "Unknown server error";
+ }
+#endif /* HAVE_HSTRERROR */
+}
+
+static int gethostnameinfo(const struct sockaddr *sa,
+ char *node,
+ size_t nodelen,
+ int flags)
+{
+ int ret = -1;
+ char *p = NULL;
+
+ if (!(flags & NI_NUMERICHOST)) {
+ struct hostent *hp = gethostbyaddr(
+ &((struct sockaddr_in *)sa)->sin_addr,
+ sizeof(struct in_addr),
+ sa->sa_family);
+ ret = check_hostent_err(hp);
+ if (ret == 0) {
+ /* Name looked up successfully. */
+ ret = snprintf(node, nodelen, "%s", hp->h_name);
+ if (ret < 0 || (size_t)ret >= nodelen) {
+ return EAI_MEMORY;
+ }
+ if (flags & NI_NOFQDN) {
+ p = strchr(node,'.');
+ if (p) {
+ *p = '\0';
+ }
+ }
+ return 0;
+ }
+
+ if (flags & NI_NAMEREQD) {
+ /* If we require a name and didn't get one,
+ * automatically fail. */
+ return ret;
+ }
+ /* Otherwise just fall into the numeric host code... */
+ }
+ p = inet_ntoa(((struct sockaddr_in *)sa)->sin_addr);
+ ret = snprintf(node, nodelen, "%s", p);
+ if (ret < 0 || (size_t)ret >= nodelen) {
+ return EAI_MEMORY;
+ }
+ return 0;
+}
+
+static int getservicenameinfo(const struct sockaddr *sa,
+ char *service,
+ size_t servicelen,
+ int flags)
+{
+ int ret = -1;
+ int port = ntohs(((struct sockaddr_in *)sa)->sin_port);
+
+ if (!(flags & NI_NUMERICSERV)) {
+ struct servent *se = getservbyport(
+ port,
+ (flags & NI_DGRAM) ? "udp" : "tcp");
+ if (se && se->s_name) {
+ /* Service name looked up successfully. */
+ ret = snprintf(service, servicelen, "%s", se->s_name);
+ if (ret < 0 || (size_t)ret >= servicelen) {
+ return EAI_MEMORY;
+ }
+ return 0;
+ }
+ /* Otherwise just fall into the numeric service code... */
+ }
+ ret = snprintf(service, servicelen, "%d", port);
+ if (ret < 0 || (size_t)ret >= servicelen) {
+ return EAI_MEMORY;
+ }
+ return 0;
+}
+
+/*
+ * Convert an ipv4 address to a hostname.
+ *
+ * Bugs: - No IPv6 support.
+ */
+int rep_getnameinfo(const struct sockaddr *sa, socklen_t salen,
+ char *node, size_t nodelen,
+ char *service, size_t servicelen, int flags)
+{
+
+ /* Invalid arguments. */
+ if (sa == NULL || (node == NULL && service == NULL)) {
+ return EAI_FAIL;
+ }
+
+ if (sa->sa_family != AF_INET) {
+ return EAI_FAIL;
+ }
+
+ if (salen < sizeof(struct sockaddr_in)) {
+ return EAI_FAIL;
+ }
+
+ if (node) {
+ return gethostnameinfo(sa, node, nodelen, flags);
+ }
+
+ if (service) {
+ return getservicenameinfo(sa, service, servicelen, flags);
+ }
+ return 0;
+}
diff --git a/lib/replace/getaddrinfo.h b/lib/replace/getaddrinfo.h
new file mode 100644
index 0000000..cf040da
--- /dev/null
+++ b/lib/replace/getaddrinfo.h
@@ -0,0 +1,91 @@
+/*
+PostgreSQL Database Management System
+(formerly known as Postgres, then as Postgres95)
+
+Portions Copyright (c) 1996-2005, The PostgreSQL Global Development Group
+
+Portions Copyright (c) 1994, The Regents of the University of California
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose, without fee, and without a written agreement
+is hereby granted, provided that the above copyright notice and this paragraph
+and the following two paragraphs appear in all copies.
+
+IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
+DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
+LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
+EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+
+THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS
+TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
+*/
+
+/*-------------------------------------------------------------------------
+ *
+ * getaddrinfo.h
+ * Support getaddrinfo() on platforms that don't have it.
+ *
+ * Note: we use our own routines on platforms that don't HAVE_STRUCT_ADDRINFO,
+ * whether or not the library routine getaddrinfo() can be found. This
+ * policy is needed because on some platforms a manually installed libbind.a
+ * may provide getaddrinfo(), yet the system headers may not provide the
+ * struct definitions needed to call it. To avoid conflict with the libbind
+ * definition in such cases, we rename our routines to pg_xxx() via macros.
+ *
+
+in lib/replace we use rep_xxx()
+
+ * This code will also work on platforms where struct addrinfo is defined
+ * in the system headers but no getaddrinfo() can be located.
+ *
+ * Copyright (c) 2003-2007, PostgreSQL Global Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef GETADDRINFO_H
+#define GETADDRINFO_H
+
+#ifndef HAVE_GETADDRINFO
+
+/* Rename private copies per comments above */
+#ifdef getaddrinfo
+#undef getaddrinfo
+#endif
+#define getaddrinfo rep_getaddrinfo
+#define HAVE_GETADDRINFO
+
+#ifdef freeaddrinfo
+#undef freeaddrinfo
+#endif
+#define freeaddrinfo rep_freeaddrinfo
+#define HAVE_FREEADDRINFO
+
+#ifdef gai_strerror
+#undef gai_strerror
+#endif
+#define gai_strerror rep_gai_strerror
+#define HAVE_GAI_STRERROR
+
+#ifdef getnameinfo
+#undef getnameinfo
+#endif
+#define getnameinfo rep_getnameinfo
+#ifndef HAVE_GETNAMEINFO
+#define HAVE_GETNAMEINFO
+#endif
+
+extern int rep_getaddrinfo(const char *node, const char *service,
+ const struct addrinfo * hints, struct addrinfo ** res);
+extern void rep_freeaddrinfo(struct addrinfo * res);
+extern const char *rep_gai_strerror(int errcode);
+extern int rep_getnameinfo(const struct sockaddr * sa, socklen_t salen,
+ char *node, size_t nodelen,
+ char *service, size_t servicelen, int flags);
+#endif /* HAVE_GETADDRINFO */
+
+#endif /* GETADDRINFO_H */
diff --git a/lib/replace/getifaddrs.c b/lib/replace/getifaddrs.c
new file mode 100644
index 0000000..c2d20f8
--- /dev/null
+++ b/lib/replace/getifaddrs.c
@@ -0,0 +1,383 @@
+/*
+ Unix SMB/CIFS implementation.
+ Samba utility functions
+ Copyright (C) Andrew Tridgell 1998
+ Copyright (C) Jeremy Allison 2007
+ Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "system/network.h"
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#ifndef SIOCGIFCONF
+#ifdef HAVE_SYS_SOCKIO_H
+#include <sys/sockio.h>
+#endif
+#endif
+
+#ifdef HAVE_IFACE_GETIFADDRS
+#define _FOUND_IFACE_ANY
+#else
+
+void rep_freeifaddrs(struct ifaddrs *ifp)
+{
+ if (ifp != NULL) {
+ free(ifp->ifa_name);
+ free(ifp->ifa_addr);
+ free(ifp->ifa_netmask);
+ free(ifp->ifa_dstaddr);
+ freeifaddrs(ifp->ifa_next);
+ free(ifp);
+ }
+}
+
+static struct sockaddr *sockaddr_dup(struct sockaddr *sa)
+{
+ struct sockaddr *ret;
+ socklen_t socklen;
+#ifdef HAVE_SOCKADDR_SA_LEN
+ socklen = sa->sa_len;
+#else
+ socklen = sizeof(struct sockaddr_storage);
+#endif
+ ret = calloc(1, socklen);
+ if (ret == NULL)
+ return NULL;
+ memcpy(ret, sa, socklen);
+ return ret;
+}
+#endif
+
+#if HAVE_IFACE_IFCONF
+
+/* this works for Linux 2.2, Solaris 2.5, SunOS4, HPUX 10.20, OSF1
+ V4.0, Ultrix 4.4, SCO Unix 3.2, IRIX 6.4 and FreeBSD 3.2.
+
+ It probably also works on any BSD style system. */
+
+int rep_getifaddrs(struct ifaddrs **ifap)
+{
+ struct ifconf ifc;
+ char buff[8192];
+ int fd, i, n;
+ struct ifreq *ifr=NULL;
+ struct ifaddrs *curif;
+ struct ifaddrs *lastif = NULL;
+
+ *ifap = NULL;
+
+ if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) {
+ return -1;
+ }
+
+ ifc.ifc_len = sizeof(buff);
+ ifc.ifc_buf = buff;
+
+ if (ioctl(fd, SIOCGIFCONF, &ifc) != 0) {
+ close(fd);
+ return -1;
+ }
+
+ ifr = ifc.ifc_req;
+
+ n = ifc.ifc_len / sizeof(struct ifreq);
+
+ /* Loop through interfaces, looking for given IP address */
+ for (i=n-1; i>=0; i--) {
+ if (ioctl(fd, SIOCGIFFLAGS, &ifr[i]) == -1) {
+ freeifaddrs(*ifap);
+ close(fd);
+ return -1;
+ }
+
+ curif = calloc(1, sizeof(struct ifaddrs));
+ if (curif == NULL) {
+ freeifaddrs(*ifap);
+ close(fd);
+ return -1;
+ }
+ curif->ifa_name = strdup(ifr[i].ifr_name);
+ if (curif->ifa_name == NULL) {
+ free(curif);
+ freeifaddrs(*ifap);
+ close(fd);
+ return -1;
+ }
+ curif->ifa_flags = ifr[i].ifr_flags;
+ curif->ifa_dstaddr = NULL;
+ curif->ifa_data = NULL;
+ curif->ifa_next = NULL;
+
+ curif->ifa_addr = NULL;
+ if (ioctl(fd, SIOCGIFADDR, &ifr[i]) != -1) {
+ curif->ifa_addr = sockaddr_dup(&ifr[i].ifr_addr);
+ if (curif->ifa_addr == NULL) {
+ free(curif->ifa_name);
+ free(curif);
+ freeifaddrs(*ifap);
+ close(fd);
+ return -1;
+ }
+ }
+
+ curif->ifa_netmask = NULL;
+ if (ioctl(fd, SIOCGIFNETMASK, &ifr[i]) != -1) {
+ curif->ifa_netmask = sockaddr_dup(&ifr[i].ifr_addr);
+ if (curif->ifa_netmask == NULL) {
+ if (curif->ifa_addr != NULL) {
+ free(curif->ifa_addr);
+ }
+ free(curif->ifa_name);
+ free(curif);
+ freeifaddrs(*ifap);
+ close(fd);
+ return -1;
+ }
+ }
+
+ if (lastif == NULL) {
+ *ifap = curif;
+ } else {
+ lastif->ifa_next = curif;
+ }
+ lastif = curif;
+ }
+
+ close(fd);
+
+ return 0;
+}
+
+#define _FOUND_IFACE_ANY
+#endif /* HAVE_IFACE_IFCONF */
+#ifdef HAVE_IFACE_IFREQ
+
+#ifndef I_STR
+#include <sys/stropts.h>
+#endif
+
+/****************************************************************************
+this should cover most of the streams based systems
+Thanks to Andrej.Borsenkow@mow.siemens.ru for several ideas in this code
+****************************************************************************/
+int rep_getifaddrs(struct ifaddrs **ifap)
+{
+ struct ifreq ifreq;
+ struct strioctl strioctl;
+ char buff[8192];
+ int fd, i, n;
+ struct ifreq *ifr=NULL;
+ struct ifaddrs *curif;
+ struct ifaddrs *lastif = NULL;
+
+ *ifap = NULL;
+
+ if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) {
+ return -1;
+ }
+
+ strioctl.ic_cmd = SIOCGIFCONF;
+ strioctl.ic_dp = buff;
+ strioctl.ic_len = sizeof(buff);
+ if (ioctl(fd, I_STR, &strioctl) < 0) {
+ close(fd);
+ return -1;
+ }
+
+ /* we can ignore the possible sizeof(int) here as the resulting
+ number of interface structures won't change */
+ n = strioctl.ic_len / sizeof(struct ifreq);
+
+ /* we will assume that the kernel returns the length as an int
+ at the start of the buffer if the offered size is a
+ multiple of the structure size plus an int */
+ if (n*sizeof(struct ifreq) + sizeof(int) == strioctl.ic_len) {
+ ifr = (struct ifreq *)(buff + sizeof(int));
+ } else {
+ ifr = (struct ifreq *)buff;
+ }
+
+ /* Loop through interfaces */
+
+ for (i = 0; i<n; i++) {
+ ifreq = ifr[i];
+
+ curif = calloc(1, sizeof(struct ifaddrs));
+ if (lastif == NULL) {
+ *ifap = curif;
+ } else {
+ lastif->ifa_next = curif;
+ }
+
+ strioctl.ic_cmd = SIOCGIFFLAGS;
+ strioctl.ic_dp = (char *)&ifreq;
+ strioctl.ic_len = sizeof(struct ifreq);
+ if (ioctl(fd, I_STR, &strioctl) != 0) {
+ freeifaddrs(*ifap);
+ return -1;
+ }
+
+ curif->ifa_flags = ifreq.ifr_flags;
+
+ strioctl.ic_cmd = SIOCGIFADDR;
+ strioctl.ic_dp = (char *)&ifreq;
+ strioctl.ic_len = sizeof(struct ifreq);
+ if (ioctl(fd, I_STR, &strioctl) != 0) {
+ freeifaddrs(*ifap);
+ return -1;
+ }
+
+ curif->ifa_name = strdup(ifreq.ifr_name);
+ curif->ifa_addr = sockaddr_dup(&ifreq.ifr_addr);
+ curif->ifa_dstaddr = NULL;
+ curif->ifa_data = NULL;
+ curif->ifa_next = NULL;
+ curif->ifa_netmask = NULL;
+
+ strioctl.ic_cmd = SIOCGIFNETMASK;
+ strioctl.ic_dp = (char *)&ifreq;
+ strioctl.ic_len = sizeof(struct ifreq);
+ if (ioctl(fd, I_STR, &strioctl) != 0) {
+ freeifaddrs(*ifap);
+ return -1;
+ }
+
+ curif->ifa_netmask = sockaddr_dup(&ifreq.ifr_addr);
+
+ lastif = curif;
+ }
+
+ close(fd);
+
+ return 0;
+}
+
+#define _FOUND_IFACE_ANY
+#endif /* HAVE_IFACE_IFREQ */
+#ifdef HAVE_IFACE_AIX
+
+/****************************************************************************
+this one is for AIX (tested on 4.2)
+****************************************************************************/
+int rep_getifaddrs(struct ifaddrs **ifap)
+{
+ char buff[8192];
+ int fd, i;
+ struct ifconf ifc;
+ struct ifreq *ifr=NULL;
+ struct ifaddrs *curif;
+ struct ifaddrs *lastif = NULL;
+
+ *ifap = NULL;
+
+ if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) {
+ return -1;
+ }
+
+ ifc.ifc_len = sizeof(buff);
+ ifc.ifc_buf = buff;
+
+ if (ioctl(fd, SIOCGIFCONF, &ifc) != 0) {
+ close(fd);
+ return -1;
+ }
+
+ ifr = ifc.ifc_req;
+
+ /* Loop through interfaces */
+ i = ifc.ifc_len;
+
+ while (i > 0) {
+ unsigned int inc;
+
+ inc = ifr->ifr_addr.sa_len;
+
+ if (ioctl(fd, SIOCGIFADDR, ifr) != 0) {
+ freeaddrinfo(*ifap);
+ return -1;
+ }
+
+ curif = calloc(1, sizeof(struct ifaddrs));
+ if (lastif == NULL) {
+ *ifap = curif;
+ } else {
+ lastif->ifa_next = curif;
+ }
+
+ curif->ifa_name = strdup(ifr->ifr_name);
+ curif->ifa_addr = sockaddr_dup(&ifr->ifr_addr);
+ curif->ifa_dstaddr = NULL;
+ curif->ifa_data = NULL;
+ curif->ifa_netmask = NULL;
+ curif->ifa_next = NULL;
+
+ if (ioctl(fd, SIOCGIFFLAGS, ifr) != 0) {
+ freeaddrinfo(*ifap);
+ return -1;
+ }
+
+ curif->ifa_flags = ifr->ifr_flags;
+
+ if (ioctl(fd, SIOCGIFNETMASK, ifr) != 0) {
+ freeaddrinfo(*ifap);
+ return -1;
+ }
+
+ curif->ifa_netmask = sockaddr_dup(&ifr->ifr_addr);
+
+ lastif = curif;
+
+ next:
+ /*
+ * Patch from Archie Cobbs (archie@whistle.com). The
+ * addresses in the SIOCGIFCONF interface list have a
+ * minimum size. Usually this doesn't matter, but if
+ * your machine has tunnel interfaces, etc. that have
+ * a zero length "link address", this does matter. */
+
+ if (inc < sizeof(ifr->ifr_addr))
+ inc = sizeof(ifr->ifr_addr);
+ inc += IFNAMSIZ;
+
+ ifr = (struct ifreq*) (((char*) ifr) + inc);
+ i -= inc;
+ }
+
+ close(fd);
+ return 0;
+}
+
+#define _FOUND_IFACE_ANY
+#endif /* HAVE_IFACE_AIX */
+#ifndef _FOUND_IFACE_ANY
+int rep_getifaddrs(struct ifaddrs **ifap)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
diff --git a/lib/replace/hdr_replace.h b/lib/replace/hdr_replace.h
new file mode 100644
index 0000000..6cfa50f
--- /dev/null
+++ b/lib/replace/hdr_replace.h
@@ -0,0 +1,2 @@
+/* this is a replacement header for a missing system header */
+#include "replace.h"
diff --git a/lib/replace/inet_aton.c b/lib/replace/inet_aton.c
new file mode 100644
index 0000000..c6b3bb1
--- /dev/null
+++ b/lib/replace/inet_aton.c
@@ -0,0 +1,33 @@
+/*
+ * Unix SMB/CIFS implementation.
+ * replacement functions
+ * Copyright (C) Michael Adam <obnox@samba.org> 2008
+ *
+ * ** NOTE! The following LGPL license applies to the replace
+ * ** library. This does NOT imply that all of Samba is released
+ * ** under the LGPL
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 3 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "replace.h"
+#include "system/network.h"
+
+/**
+ * We know that we have inet_pton from earlier libreplace checks.
+ */
+int rep_inet_aton(const char *src, struct in_addr *dst)
+{
+ return (inet_pton(AF_INET, src, dst) > 0) ? 1 : 0;
+}
diff --git a/lib/replace/inet_ntoa.c b/lib/replace/inet_ntoa.c
new file mode 100644
index 0000000..e3b80eb
--- /dev/null
+++ b/lib/replace/inet_ntoa.c
@@ -0,0 +1,39 @@
+/*
+ * Unix SMB/CIFS implementation.
+ * replacement routines for broken systems
+ * Copyright (C) Andrew Tridgell 2003
+ * Copyright (C) Michael Adam 2008
+ *
+ * ** NOTE! The following LGPL license applies to the replace
+ * ** library. This does NOT imply that all of Samba is released
+ * ** under the LGPL
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 3 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "replace.h"
+#include "system/network.h"
+
+/**
+ * NOTE: this is not thread safe, but it can't be, either
+ * since it returns a pointer to static memory.
+ */
+char *rep_inet_ntoa(struct in_addr ip)
+{
+ uint8_t *p = (uint8_t *)&ip.s_addr;
+ static char buf[18];
+ slprintf(buf, 17, "%d.%d.%d.%d",
+ (int)p[0], (int)p[1], (int)p[2], (int)p[3]);
+ return buf;
+}
diff --git a/lib/replace/inet_ntop.c b/lib/replace/inet_ntop.c
new file mode 100644
index 0000000..fb3d8e9
--- /dev/null
+++ b/lib/replace/inet_ntop.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 1996-2001 Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM
+ * DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+ * INTERNET SOFTWARE CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+ * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#include "replace.h"
+#include "system/network.h"
+
+#define NS_INT16SZ 2
+#define NS_IN6ADDRSZ 16
+
+/*
+ * WARNING: Don't even consider trying to compile this on a system where
+ * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX.
+ */
+
+static const char *inet_ntop4(const unsigned char *src, char *dst,
+ socklen_t size);
+
+#ifdef AF_INET6
+static const char *inet_ntop6(const unsigned char *src, char *dst,
+ socklen_t size);
+#endif
+
+/* char *
+ * isc_net_ntop(af, src, dst, size)
+ * convert a network format address to presentation format.
+ * return:
+ * pointer to presentation format address (`dst'), or NULL (see errno).
+ * author:
+ * Paul Vixie, 1996.
+ */
+const char *
+rep_inet_ntop(int af, const void *src, char *dst, socklen_t size)
+{
+ switch (af) {
+ case AF_INET:
+ return (inet_ntop4(src, dst, size));
+#ifdef AF_INET6
+ case AF_INET6:
+ return (inet_ntop6(src, dst, size));
+#endif
+ default:
+ errno = EAFNOSUPPORT;
+ return (NULL);
+ }
+ /* NOTREACHED */
+}
+
+/* const char *
+ * inet_ntop4(src, dst, size)
+ * format an IPv4 address
+ * return:
+ * `dst' (as a const)
+ * notes:
+ * (1) uses no statics
+ * (2) takes a unsigned char* not an in_addr as input
+ * author:
+ * Paul Vixie, 1996.
+ */
+static const char *
+inet_ntop4(const unsigned char *src, char *dst, socklen_t size)
+{
+ static const char *fmt = "%u.%u.%u.%u";
+ char tmp[sizeof "255.255.255.255"];
+ size_t len;
+
+ len = snprintf(tmp, sizeof tmp, fmt, src[0], src[1], src[2], src[3]);
+ if (len >= size) {
+ errno = ENOSPC;
+ return (NULL);
+ }
+ memcpy(dst, tmp, len + 1);
+
+ return (dst);
+}
+
+/* const char *
+ * isc_inet_ntop6(src, dst, size)
+ * convert IPv6 binary address into presentation (printable) format
+ * author:
+ * Paul Vixie, 1996.
+ */
+#ifdef AF_INET6
+static const char *
+inet_ntop6(const unsigned char *src, char *dst, socklen_t size)
+{
+ /*
+ * Note that int32_t and int16_t need only be "at least" large enough
+ * to contain a value of the specified size. On some systems, like
+ * Crays, there is no such thing as an integer variable with 16 bits.
+ * Keep this in mind if you think this function should have been coded
+ * to use pointer overlays. All the world's not a VAX.
+ */
+ char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"], *tp;
+ struct { int base, len; } best, cur;
+ unsigned int words[NS_IN6ADDRSZ / NS_INT16SZ];
+ int i, inc;
+
+ /*
+ * Preprocess:
+ * Copy the input (bytewise) array into a wordwise array.
+ * Find the longest run of 0x00's in src[] for :: shorthanding.
+ */
+ memset(words, '\0', sizeof words);
+ for (i = 0; i < NS_IN6ADDRSZ; i++)
+ words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3));
+ best.base = -1;
+ best.len = 0;
+ cur.base = -1;
+ cur.len = 0;
+ for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) {
+ if (words[i] == 0) {
+ if (cur.base == -1)
+ cur.base = i, cur.len = 1;
+ else
+ cur.len++;
+ } else {
+ if (cur.base != -1) {
+ if (best.base == -1 || cur.len > best.len)
+ best = cur;
+ cur.base = -1;
+ }
+ }
+ }
+ if (cur.base != -1) {
+ if (best.base == -1 || cur.len > best.len)
+ best = cur;
+ }
+ if (best.base != -1 && best.len < 2)
+ best.base = -1;
+
+ /*
+ * Format the result.
+ */
+ tp = tmp;
+ for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) {
+ /* Are we inside the best run of 0x00's? */
+ if (best.base != -1 && i >= best.base &&
+ i < (best.base + best.len)) {
+ if (i == best.base)
+ *tp++ = ':';
+ continue;
+ }
+ /* Are we following an initial run of 0x00s or any real hex? */
+ if (i != 0)
+ *tp++ = ':';
+ /* Is this address an encapsulated IPv4? */
+ if (i == 6 && best.base == 0 &&
+ (best.len == 6 || (best.len == 5 && words[5] == 0xffff))) {
+ if (!inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp)))
+ return (NULL);
+ tp += strlen(tp);
+ break;
+ }
+ inc = snprintf(tp, 5, "%x", words[i]);
+ if (inc >= 5) {
+ abort();
+ }
+ tp += inc;
+ }
+ /* Was it a trailing run of 0x00's? */
+ if (best.base != -1 && (best.base + best.len) ==
+ (NS_IN6ADDRSZ / NS_INT16SZ))
+ *tp++ = ':';
+ *tp++ = '\0';
+
+ /*
+ * Check for overflow, copy, and we're done.
+ */
+ if ((size_t)(tp - tmp) > size) {
+ errno = ENOSPC;
+ return (NULL);
+ }
+ memcpy(dst, tmp, tp - tmp);
+ return (dst);
+}
+#endif /* AF_INET6 */
diff --git a/lib/replace/inet_pton.c b/lib/replace/inet_pton.c
new file mode 100644
index 0000000..80e4865
--- /dev/null
+++ b/lib/replace/inet_pton.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 1996-2001 Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM
+ * DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+ * INTERNET SOFTWARE CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+ * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+ * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "replace.h"
+#include "system/network.h"
+
+#define NS_INT16SZ 2
+#define NS_INADDRSZ 4
+#define NS_IN6ADDRSZ 16
+
+/*
+ * WARNING: Don't even consider trying to compile this on a system where
+ * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX.
+ */
+
+static int inet_pton4(const char *src, unsigned char *dst);
+#ifdef AF_INET6
+static int inet_pton6(const char *src, unsigned char *dst);
+#endif
+
+/* int
+ * inet_pton(af, src, dst)
+ * convert from presentation format (which usually means ASCII printable)
+ * to network format (which is usually some kind of binary format).
+ * return:
+ * 1 if the address was valid for the specified address family
+ * 0 if the address wasn't valid (`dst' is untouched in this case)
+ * -1 if some other error occurred (`dst' is untouched in this case, too)
+ * author:
+ * Paul Vixie, 1996.
+ */
+int
+rep_inet_pton(int af,
+ const char *src,
+ void *dst)
+{
+ switch (af) {
+ case AF_INET:
+ return (inet_pton4(src, dst));
+#ifdef AF_INET6
+ case AF_INET6:
+ return (inet_pton6(src, dst));
+#endif
+ default:
+ errno = EAFNOSUPPORT;
+ return (-1);
+ }
+ /* NOTREACHED */
+}
+
+/* int
+ * inet_pton4(src, dst)
+ * like inet_aton() but without all the hexadecimal and shorthand.
+ * return:
+ * 1 if `src' is a valid dotted quad, else 0.
+ * notice:
+ * does not touch `dst' unless it's returning 1.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton4(src, dst)
+ const char *src;
+ unsigned char *dst;
+{
+ static const char digits[] = "0123456789";
+ int saw_digit, octets, ch;
+ unsigned char tmp[NS_INADDRSZ], *tp;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ if ((pch = strchr(digits, ch)) != NULL) {
+ unsigned int new = *tp * 10 + (pch - digits);
+
+ if (new > 255)
+ return (0);
+ *tp = new;
+ if (! saw_digit) {
+ if (++octets > 4)
+ return (0);
+ saw_digit = 1;
+ }
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return (0);
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return (0);
+ }
+ if (octets < 4)
+ return (0);
+ memcpy(dst, tmp, NS_INADDRSZ);
+ return (1);
+}
+
+/* int
+ * inet_pton6(src, dst)
+ * convert presentation level address to network order binary form.
+ * return:
+ * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
+ * notice:
+ * (1) does not touch `dst' unless it's returning 1.
+ * (2) :: in a full address is silently ignored.
+ * credit:
+ * inspired by Mark Andrews.
+ * author:
+ * Paul Vixie, 1996.
+ */
+#ifdef AF_INET6
+static int
+inet_pton6(src, dst)
+ const char *src;
+ unsigned char *dst;
+{
+ static const char xdigits_l[] = "0123456789abcdef",
+ xdigits_u[] = "0123456789ABCDEF";
+ unsigned char tmp[NS_IN6ADDRSZ], *tp, *endp, *colonp;
+ const char *xdigits, *curtok;
+ int ch, saw_xdigit;
+ unsigned int val;
+
+ memset((tp = tmp), '\0', NS_IN6ADDRSZ);
+ endp = tp + NS_IN6ADDRSZ;
+ colonp = NULL;
+ /* Leading :: requires some special handling. */
+ if (*src == ':')
+ if (*++src != ':')
+ return (0);
+ curtok = src;
+ saw_xdigit = 0;
+ val = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
+ pch = strchr((xdigits = xdigits_u), ch);
+ if (pch != NULL) {
+ val <<= 4;
+ val |= (pch - xdigits);
+ if (val > 0xffff)
+ return (0);
+ saw_xdigit = 1;
+ continue;
+ }
+ if (ch == ':') {
+ curtok = src;
+ if (!saw_xdigit) {
+ if (colonp)
+ return (0);
+ colonp = tp;
+ continue;
+ }
+ if (tp + NS_INT16SZ > endp)
+ return (0);
+ *tp++ = (unsigned char) (val >> 8) & 0xff;
+ *tp++ = (unsigned char) val & 0xff;
+ saw_xdigit = 0;
+ val = 0;
+ continue;
+ }
+ if (ch == '.' && ((tp + NS_INADDRSZ) <= endp) &&
+ inet_pton4(curtok, tp) > 0) {
+ tp += NS_INADDRSZ;
+ saw_xdigit = 0;
+ break; /* '\0' was seen by inet_pton4(). */
+ }
+ return (0);
+ }
+ if (saw_xdigit) {
+ if (tp + NS_INT16SZ > endp)
+ return (0);
+ *tp++ = (unsigned char) (val >> 8) & 0xff;
+ *tp++ = (unsigned char) val & 0xff;
+ }
+ if (colonp != NULL) {
+ /*
+ * Since some memmove()'s erroneously fail to handle
+ * overlapping regions, we'll do the shift by hand.
+ */
+ const int n = tp - colonp;
+ int i;
+
+ for (i = 1; i <= n; i++) {
+ endp[- i] = colonp[n - i];
+ colonp[n - i] = 0;
+ }
+ tp = endp;
+ }
+ if (tp != endp)
+ return (0);
+ memcpy(dst, tmp, NS_IN6ADDRSZ);
+ return (1);
+}
+#endif
diff --git a/lib/replace/poll.c b/lib/replace/poll.c
new file mode 100644
index 0000000..1105617
--- /dev/null
+++ b/lib/replace/poll.c
@@ -0,0 +1,139 @@
+/*
+ Unix SMB/CIFS implementation.
+ poll.c - poll wrapper
+
+ This file is based on code from libssh (LGPLv2.1+ at the time it
+ was downloaded), thus the following copyrights:
+
+ Copyright (c) 2009-2010 by Andreas Schneider <mail@cynapses.org>
+ Copyright (c) 2003-2009 by Aris Adamantiadis
+ Copyright (c) 2009 Aleksandar Kanchev
+ Copyright (C) Volker Lendecke 2011
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "replace.h"
+#include "system/select.h"
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef HAVE_SYS_IOCTL_H
+#include <sys/ioctl.h>
+#endif
+
+
+int rep_poll(struct pollfd *fds, nfds_t nfds, int timeout)
+{
+ fd_set rfds, wfds, efds;
+ struct timeval tv, *ptv;
+ int max_fd;
+ int rc;
+ nfds_t i;
+
+ if ((fds == NULL) && (nfds != 0)) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ FD_ZERO(&rfds);
+ FD_ZERO(&wfds);
+ FD_ZERO(&efds);
+
+ rc = 0;
+ max_fd = 0;
+
+ /* compute fd_sets and find largest descriptor */
+ for (i = 0; i < nfds; i++) {
+ if ((fds[i].fd < 0) || (fds[i].fd >= FD_SETSIZE)) {
+ fds[i].revents = POLLNVAL;
+ continue;
+ }
+
+ if (fds[i].events & (POLLIN | POLLRDNORM)) {
+ FD_SET(fds[i].fd, &rfds);
+ }
+ if (fds[i].events & (POLLOUT | POLLWRNORM | POLLWRBAND)) {
+ FD_SET(fds[i].fd, &wfds);
+ }
+ if (fds[i].events & (POLLPRI | POLLRDBAND)) {
+ FD_SET(fds[i].fd, &efds);
+ }
+ if (fds[i].fd > max_fd &&
+ (fds[i].events & (POLLIN | POLLOUT | POLLPRI |
+ POLLRDNORM | POLLRDBAND |
+ POLLWRNORM | POLLWRBAND))) {
+ max_fd = fds[i].fd;
+ }
+ }
+
+ if (timeout < 0) {
+ ptv = NULL;
+ } else {
+ ptv = &tv;
+ if (timeout == 0) {
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ } else {
+ tv.tv_sec = timeout / 1000;
+ tv.tv_usec = (timeout % 1000) * 1000;
+ }
+ }
+
+ rc = select(max_fd + 1, &rfds, &wfds, &efds, ptv);
+ if (rc < 0) {
+ return -1;
+ }
+
+ for (rc = 0, i = 0; i < nfds; i++) {
+ if ((fds[i].fd < 0) || (fds[i].fd >= FD_SETSIZE)) {
+ continue;
+ }
+
+ fds[i].revents = 0;
+
+ if (FD_ISSET(fds[i].fd, &rfds)) {
+ int err = errno;
+ int available = 0;
+ int ret;
+
+ /* support for POLLHUP */
+ ret = ioctl(fds[i].fd, FIONREAD, &available);
+ if ((ret == -1) || (available == 0)) {
+ fds[i].revents |= POLLHUP;
+ } else {
+ fds[i].revents |= fds[i].events
+ & (POLLIN | POLLRDNORM);
+ }
+
+ errno = err;
+ }
+ if (FD_ISSET(fds[i].fd, &wfds)) {
+ fds[i].revents |= fds[i].events
+ & (POLLOUT | POLLWRNORM | POLLWRBAND);
+ }
+ if (FD_ISSET(fds[i].fd, &efds)) {
+ fds[i].revents |= fds[i].events
+ & (POLLPRI | POLLRDBAND);
+ }
+ if (fds[i].revents & ~POLLHUP) {
+ rc++;
+ }
+ }
+ return rc;
+}
diff --git a/lib/replace/replace-test.h b/lib/replace/replace-test.h
new file mode 100644
index 0000000..ed8e75e
--- /dev/null
+++ b/lib/replace/replace-test.h
@@ -0,0 +1,9 @@
+#ifndef __LIB_REPLACE_REPLACE_TEST_H__
+#define __LIB_REPLACE_REPLACE_TEST_H__
+
+int libreplace_test_strptime(void);
+int test_readdir_os2_delete(void);
+int getifaddrs_test(void);
+
+#endif /* __LIB_REPLACE_REPLACE_TEST_H__ */
+
diff --git a/lib/replace/replace-testsuite.h b/lib/replace/replace-testsuite.h
new file mode 100644
index 0000000..b28dbec
--- /dev/null
+++ b/lib/replace/replace-testsuite.h
@@ -0,0 +1,10 @@
+#ifndef __LIB_REPLACE_REPLACE_TESTSUITE_H__
+#define __LIB_REPLACE_REPLACE_TESTSUITE_H__
+
+#include <stdbool.h>
+struct torture_context;
+
+bool torture_local_replace(struct torture_context *ctx);
+
+#endif /* __LIB_REPLACE_REPLACE_TESTSUITE_H__ */
+
diff --git a/lib/replace/replace.c b/lib/replace/replace.c
new file mode 100644
index 0000000..9fae44a
--- /dev/null
+++ b/lib/replace/replace.c
@@ -0,0 +1,910 @@
+/*
+ Unix SMB/CIFS implementation.
+ replacement routines for broken systems
+ Copyright (C) Andrew Tridgell 1992-1998
+ Copyright (C) Jelmer Vernooij 2005-2008
+ Copyright (C) Matthieu Patou 2010
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+
+#include "system/filesys.h"
+#include "system/time.h"
+#include "system/network.h"
+#include "system/passwd.h"
+#include "system/syslog.h"
+#include "system/locale.h"
+#include "system/wait.h"
+
+#ifdef _WIN32
+#define mkdir(d,m) _mkdir(d)
+#endif
+
+void replace_dummy(void);
+void replace_dummy(void) {}
+
+#ifndef HAVE_FTRUNCATE
+ /*******************************************************************
+ftruncate for operating systems that don't have it
+********************************************************************/
+int rep_ftruncate(int f, off_t l)
+{
+#ifdef HAVE_CHSIZE
+ return chsize(f,l);
+#elif defined(F_FREESP)
+ struct flock fl;
+
+ fl.l_whence = 0;
+ fl.l_len = 0;
+ fl.l_start = l;
+ fl.l_type = F_WRLCK;
+ return fcntl(f, F_FREESP, &fl);
+#else
+#error "you must have a ftruncate function"
+#endif
+}
+#endif /* HAVE_FTRUNCATE */
+
+
+#ifndef HAVE_STRLCPY
+/*
+ * Like strncpy but does not 0 fill the buffer and always null
+ * terminates. bufsize is the size of the destination buffer.
+ * Returns the length of s.
+ */
+size_t rep_strlcpy(char *d, const char *s, size_t bufsize)
+{
+ size_t len = strlen(s);
+ size_t ret = len;
+
+ if (bufsize <= 0) {
+ return 0;
+ }
+ if (len >= bufsize) {
+ len = bufsize - 1;
+ }
+ memcpy(d, s, len);
+ d[len] = 0;
+ return ret;
+}
+#endif
+
+#ifndef HAVE_STRLCAT
+/* like strncat but does not 0 fill the buffer and always null
+ terminates. bufsize is the length of the buffer, which should
+ be one more than the maximum resulting string length */
+size_t rep_strlcat(char *d, const char *s, size_t bufsize)
+{
+ size_t len1 = strnlen(d, bufsize);
+ size_t len2 = strlen(s);
+ size_t ret = len1 + len2;
+
+ if (len1+len2 >= bufsize) {
+ if (bufsize < (len1+1)) {
+ return ret;
+ }
+ len2 = bufsize - (len1+1);
+ }
+ if (len2 > 0) {
+ memcpy(d+len1, s, len2);
+ d[len1+len2] = 0;
+ }
+ return ret;
+}
+#endif
+
+#ifndef HAVE_MKTIME
+/*******************************************************************
+a mktime() replacement for those who don't have it - contributed by
+C.A. Lademann <cal@zls.com>
+Corrections by richard.kettlewell@kewill.com
+********************************************************************/
+
+#define MINUTE 60
+#define HOUR 60*MINUTE
+#define DAY 24*HOUR
+#define YEAR 365*DAY
+time_t rep_mktime(struct tm *t)
+{
+ struct tm *u;
+ time_t epoch = 0;
+ int n;
+ int mon [] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 },
+ y, m, i;
+
+ if(t->tm_year < 70)
+ return((time_t)-1);
+
+ n = t->tm_year + 1900 - 1;
+ epoch = (t->tm_year - 70) * YEAR +
+ ((n / 4 - n / 100 + n / 400) - (1969 / 4 - 1969 / 100 + 1969 / 400)) * DAY;
+
+ y = t->tm_year + 1900;
+ m = 0;
+
+ for(i = 0; i < t->tm_mon; i++) {
+ epoch += mon [m] * DAY;
+ if(m == 1 && y % 4 == 0 && (y % 100 != 0 || y % 400 == 0))
+ epoch += DAY;
+
+ if(++m > 11) {
+ m = 0;
+ y++;
+ }
+ }
+
+ epoch += (t->tm_mday - 1) * DAY;
+ epoch += t->tm_hour * HOUR + t->tm_min * MINUTE + t->tm_sec;
+
+ if((u = localtime(&epoch)) != NULL) {
+ t->tm_sec = u->tm_sec;
+ t->tm_min = u->tm_min;
+ t->tm_hour = u->tm_hour;
+ t->tm_mday = u->tm_mday;
+ t->tm_mon = u->tm_mon;
+ t->tm_year = u->tm_year;
+ t->tm_wday = u->tm_wday;
+ t->tm_yday = u->tm_yday;
+ t->tm_isdst = u->tm_isdst;
+ }
+
+ return(epoch);
+}
+#endif /* !HAVE_MKTIME */
+
+
+#ifndef HAVE_INITGROUPS
+/****************************************************************************
+ some systems don't have an initgroups call
+****************************************************************************/
+int rep_initgroups(char *name, gid_t id)
+{
+#ifndef HAVE_SETGROUPS
+ /* yikes! no SETGROUPS or INITGROUPS? how can this work? */
+ errno = ENOSYS;
+ return -1;
+#else /* HAVE_SETGROUPS */
+
+#include <grp.h>
+
+ gid_t *grouplst = NULL;
+ int max_gr = NGROUPS_MAX;
+ int ret;
+ int i,j;
+ struct group *g;
+ char *gr;
+
+ if((grouplst = malloc(sizeof(gid_t) * max_gr)) == NULL) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+ grouplst[0] = id;
+ i = 1;
+ while (i < max_gr && ((g = (struct group *)getgrent()) != (struct group *)NULL)) {
+ if (g->gr_gid == id)
+ continue;
+ j = 0;
+ gr = g->gr_mem[0];
+ while (gr && (*gr != (char)NULL)) {
+ if (strcmp(name,gr) == 0) {
+ grouplst[i] = g->gr_gid;
+ i++;
+ gr = (char *)NULL;
+ break;
+ }
+ gr = g->gr_mem[++j];
+ }
+ }
+ endgrent();
+ ret = setgroups(i, grouplst);
+ free(grouplst);
+ return ret;
+#endif /* HAVE_SETGROUPS */
+}
+#endif /* HAVE_INITGROUPS */
+
+
+#ifndef HAVE_MEMMOVE
+/*******************************************************************
+safely copies memory, ensuring no overlap problems.
+this is only used if the machine does not have its own memmove().
+this is not the fastest algorithm in town, but it will do for our
+needs.
+********************************************************************/
+void *rep_memmove(void *dest,const void *src,int size)
+{
+ unsigned long d,s;
+ int i;
+ if (dest==src || !size) return(dest);
+
+ d = (unsigned long)dest;
+ s = (unsigned long)src;
+
+ if ((d >= (s+size)) || (s >= (d+size))) {
+ /* no overlap */
+ memcpy(dest,src,size);
+ return(dest);
+ }
+
+ if (d < s) {
+ /* we can forward copy */
+ if (s-d >= sizeof(int) &&
+ !(s%sizeof(int)) &&
+ !(d%sizeof(int)) &&
+ !(size%sizeof(int))) {
+ /* do it all as words */
+ int *idest = (int *)dest;
+ int *isrc = (int *)src;
+ size /= sizeof(int);
+ for (i=0;i<size;i++) idest[i] = isrc[i];
+ } else {
+ /* simplest */
+ char *cdest = (char *)dest;
+ char *csrc = (char *)src;
+ for (i=0;i<size;i++) cdest[i] = csrc[i];
+ }
+ } else {
+ /* must backward copy */
+ if (d-s >= sizeof(int) &&
+ !(s%sizeof(int)) &&
+ !(d%sizeof(int)) &&
+ !(size%sizeof(int))) {
+ /* do it all as words */
+ int *idest = (int *)dest;
+ int *isrc = (int *)src;
+ size /= sizeof(int);
+ for (i=size-1;i>=0;i--) idest[i] = isrc[i];
+ } else {
+ /* simplest */
+ char *cdest = (char *)dest;
+ char *csrc = (char *)src;
+ for (i=size-1;i>=0;i--) cdest[i] = csrc[i];
+ }
+ }
+ return(dest);
+}
+#endif /* HAVE_MEMMOVE */
+
+#ifndef HAVE_STRDUP
+/****************************************************************************
+duplicate a string
+****************************************************************************/
+char *rep_strdup(const char *s)
+{
+ size_t len;
+ char *ret;
+
+ if (!s) return(NULL);
+
+ len = strlen(s)+1;
+ ret = (char *)malloc(len);
+ if (!ret) return(NULL);
+ memcpy(ret,s,len);
+ return(ret);
+}
+#endif /* HAVE_STRDUP */
+
+#ifndef HAVE_SETLINEBUF
+void rep_setlinebuf(FILE *stream)
+{
+ setvbuf(stream, (char *)NULL, _IOLBF, 0);
+}
+#endif /* HAVE_SETLINEBUF */
+
+#ifndef HAVE_VSYSLOG
+#ifdef HAVE_SYSLOG
+void rep_vsyslog (int facility_priority, const char *format, va_list arglist)
+{
+ char *msg = NULL;
+ vasprintf(&msg, format, arglist);
+ if (!msg)
+ return;
+ syslog(facility_priority, "%s", msg);
+ free(msg);
+}
+#endif /* HAVE_SYSLOG */
+#endif /* HAVE_VSYSLOG */
+
+#ifndef HAVE_STRNLEN
+/**
+ Some platforms don't have strnlen
+**/
+ size_t rep_strnlen(const char *s, size_t max)
+{
+ size_t len;
+
+ for (len = 0; len < max; len++) {
+ if (s[len] == '\0') {
+ break;
+ }
+ }
+ return len;
+}
+#endif
+
+#ifndef HAVE_STRNDUP
+/**
+ Some platforms don't have strndup.
+**/
+char *rep_strndup(const char *s, size_t n)
+{
+ char *ret;
+
+ n = strnlen(s, n);
+ ret = malloc(n+1);
+ if (!ret)
+ return NULL;
+ memcpy(ret, s, n);
+ ret[n] = 0;
+
+ return ret;
+}
+#endif
+
+#if !defined(HAVE_WAITPID) && defined(HAVE_WAIT4)
+int rep_waitpid(pid_t pid,int *status,int options)
+{
+ return wait4(pid, status, options, NULL);
+}
+#endif
+
+#ifndef HAVE_SETEUID
+int rep_seteuid(uid_t euid)
+{
+#ifdef HAVE_SETRESUID
+ return setresuid(-1, euid, -1);
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+#endif
+
+#ifndef HAVE_SETEGID
+int rep_setegid(gid_t egid)
+{
+#ifdef HAVE_SETRESGID
+ return setresgid(-1, egid, -1);
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+#endif
+
+/*******************************************************************
+os/2 also doesn't have chroot
+********************************************************************/
+#ifndef HAVE_CHROOT
+int rep_chroot(const char *dname)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
+/*****************************************************************
+ Possibly replace mkstemp if it is broken.
+*****************************************************************/
+
+#ifndef HAVE_SECURE_MKSTEMP
+int rep_mkstemp(char *template)
+{
+ /* have a reasonable go at emulating it. Hope that
+ the system mktemp() isn't completely hopeless */
+ mktemp(template);
+ if (template[0] == 0)
+ return -1;
+ return open(template, O_CREAT|O_EXCL|O_RDWR, 0600);
+}
+#endif
+
+#ifndef HAVE_MKDTEMP
+char *rep_mkdtemp(char *template)
+{
+ char *dname;
+
+ if ((dname = mktemp(template))) {
+ if (mkdir(dname, 0700) >= 0) {
+ return dname;
+ }
+ }
+
+ return NULL;
+}
+#endif
+
+/*****************************************************************
+ Watch out: this is not thread safe.
+*****************************************************************/
+
+#ifndef HAVE_PREAD
+ssize_t rep_pread(int __fd, void *__buf, size_t __nbytes, off_t __offset)
+{
+ if (lseek(__fd, __offset, SEEK_SET) != __offset) {
+ return -1;
+ }
+ return read(__fd, __buf, __nbytes);
+}
+#endif
+
+/*****************************************************************
+ Watch out: this is not thread safe.
+*****************************************************************/
+
+#ifndef HAVE_PWRITE
+ssize_t rep_pwrite(int __fd, const void *__buf, size_t __nbytes, off_t __offset)
+{
+ if (lseek(__fd, __offset, SEEK_SET) != __offset) {
+ return -1;
+ }
+ return write(__fd, __buf, __nbytes);
+}
+#endif
+
+#ifndef HAVE_STRCASESTR
+char *rep_strcasestr(const char *haystack, const char *needle)
+{
+ const char *s;
+ size_t nlen = strlen(needle);
+ for (s=haystack;*s;s++) {
+ if (toupper(*needle) == toupper(*s) &&
+ strncasecmp(s, needle, nlen) == 0) {
+ return (char *)((uintptr_t)s);
+ }
+ }
+ return NULL;
+}
+#endif
+
+#ifndef HAVE_STRTOK_R
+/* based on GLIBC version, copyright Free Software Foundation */
+char *rep_strtok_r(char *s, const char *delim, char **save_ptr)
+{
+ char *token;
+
+ if (s == NULL) s = *save_ptr;
+
+ s += strspn(s, delim);
+ if (*s == '\0') {
+ *save_ptr = s;
+ return NULL;
+ }
+
+ token = s;
+ s = strpbrk(token, delim);
+ if (s == NULL) {
+ *save_ptr = token + strlen(token);
+ } else {
+ *s = '\0';
+ *save_ptr = s + 1;
+ }
+
+ return token;
+}
+#endif
+
+
+#ifndef HAVE_STRTOLL
+long long int rep_strtoll(const char *str, char **endptr, int base)
+{
+#ifdef HAVE_STRTOQ
+ return strtoq(str, endptr, base);
+#elif defined(HAVE___STRTOLL)
+ return __strtoll(str, endptr, base);
+#elif SIZEOF_LONG == SIZEOF_LONG_LONG
+ return (long long int) strtol(str, endptr, base);
+#else
+# error "You need a strtoll function"
+#endif
+}
+#else
+#ifdef HAVE_BSD_STRTOLL
+#ifdef HAVE_STRTOQ
+long long int rep_strtoll(const char *str, char **endptr, int base)
+{
+ long long int nb = strtoq(str, endptr, base);
+ /* In linux EINVAL is only returned if base is not ok */
+ if (errno == EINVAL) {
+ if (base == 0 || (base >1 && base <37)) {
+ /* Base was ok so it's because we were not
+ * able to make the convertion.
+ * Let's reset errno.
+ */
+ errno = 0;
+ }
+ }
+ return nb;
+}
+#else
+#error "You need the strtoq function"
+#endif /* HAVE_STRTOQ */
+#endif /* HAVE_BSD_STRTOLL */
+#endif /* HAVE_STRTOLL */
+
+
+#ifndef HAVE_STRTOULL
+unsigned long long int rep_strtoull(const char *str, char **endptr, int base)
+{
+#ifdef HAVE_STRTOUQ
+ return strtouq(str, endptr, base);
+#elif defined(HAVE___STRTOULL)
+ return __strtoull(str, endptr, base);
+#elif SIZEOF_LONG == SIZEOF_LONG_LONG
+ return (unsigned long long int) strtoul(str, endptr, base);
+#else
+# error "You need a strtoull function"
+#endif
+}
+#else
+#ifdef HAVE_BSD_STRTOLL
+#ifdef HAVE_STRTOUQ
+unsigned long long int rep_strtoull(const char *str, char **endptr, int base)
+{
+ unsigned long long int nb = strtouq(str, endptr, base);
+ /* In linux EINVAL is only returned if base is not ok */
+ if (errno == EINVAL) {
+ if (base == 0 || (base >1 && base <37)) {
+ /* Base was ok so it's because we were not
+ * able to make the convertion.
+ * Let's reset errno.
+ */
+ errno = 0;
+ }
+ }
+ return nb;
+}
+#else
+#error "You need the strtouq function"
+#endif /* HAVE_STRTOUQ */
+#endif /* HAVE_BSD_STRTOLL */
+#endif /* HAVE_STRTOULL */
+
+#ifndef HAVE_SETENV
+int rep_setenv(const char *name, const char *value, int overwrite)
+{
+ char *p;
+ size_t l1, l2;
+ int ret;
+
+ if (!overwrite && getenv(name)) {
+ return 0;
+ }
+
+ l1 = strlen(name);
+ l2 = strlen(value);
+
+ p = malloc(l1+l2+2);
+ if (p == NULL) {
+ return -1;
+ }
+ memcpy(p, name, l1);
+ p[l1] = '=';
+ memcpy(p+l1+1, value, l2);
+ p[l1+l2+1] = 0;
+
+ ret = putenv(p);
+ if (ret != 0) {
+ free(p);
+ }
+
+ return ret;
+}
+#endif
+
+#ifndef HAVE_UNSETENV
+int rep_unsetenv(const char *name)
+{
+ extern char **environ;
+ size_t len = strlen(name);
+ size_t i, count;
+
+ if (environ == NULL || getenv(name) == NULL) {
+ return 0;
+ }
+
+ for (i=0;environ[i];i++) /* noop */ ;
+
+ count=i;
+
+ for (i=0;i<count;) {
+ if (strncmp(environ[i], name, len) == 0 && environ[i][len] == '=') {
+ /* note: we do _not_ free the old variable here. It is unsafe to
+ do so, as the pointer may not have come from malloc */
+ memmove(&environ[i], &environ[i+1], (count-i)*sizeof(char *));
+ count--;
+ } else {
+ i++;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifndef HAVE_UTIME
+int rep_utime(const char *filename, const struct utimbuf *buf)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
+#ifndef HAVE_UTIMES
+int rep_utimes(const char *filename, const struct timeval tv[2])
+{
+ struct utimbuf u;
+
+ u.actime = tv[0].tv_sec;
+ if (tv[0].tv_usec > 500000) {
+ u.actime += 1;
+ }
+
+ u.modtime = tv[1].tv_sec;
+ if (tv[1].tv_usec > 500000) {
+ u.modtime += 1;
+ }
+
+ return utime(filename, &u);
+}
+#endif
+
+#ifndef HAVE_DUP2
+int rep_dup2(int oldfd, int newfd)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
+#ifndef HAVE_CHOWN
+/**
+chown isn't used much but OS/2 doesn't have it
+**/
+int rep_chown(const char *fname, uid_t uid, gid_t gid)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
+#ifndef HAVE_LINK
+int rep_link(const char *oldpath, const char *newpath)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
+#ifndef HAVE_READLINK
+int rep_readlink(const char *path, char *buf, size_t bufsiz)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
+#ifndef HAVE_SYMLINK
+int rep_symlink(const char *oldpath, const char *newpath)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
+#ifndef HAVE_LCHOWN
+int rep_lchown(const char *fname,uid_t uid,gid_t gid)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
+#ifndef HAVE_REALPATH
+char *rep_realpath(const char *path, char *resolved_path)
+{
+ /* As realpath is not a system call we can't return ENOSYS. */
+ errno = EINVAL;
+ return NULL;
+}
+#endif
+
+
+#ifndef HAVE_MEMMEM
+void *rep_memmem(const void *haystack, size_t haystacklen,
+ const void *needle, size_t needlelen)
+{
+ if (needlelen == 0) {
+ return discard_const(haystack);
+ }
+ while (haystacklen >= needlelen) {
+ char *p = (char *)memchr(haystack, *(const char *)needle,
+ haystacklen-(needlelen-1));
+ if (!p) return NULL;
+ if (memcmp(p, needle, needlelen) == 0) {
+ return p;
+ }
+ haystack = p+1;
+ haystacklen -= (p - (const char *)haystack) + 1;
+ }
+ return NULL;
+}
+#endif
+
+#if !defined(HAVE_VDPRINTF) || !defined(HAVE_C99_VSNPRINTF)
+int rep_vdprintf(int fd, const char *format, va_list ap)
+{
+ char *s = NULL;
+ int ret;
+
+ vasprintf(&s, format, ap);
+ if (s == NULL) {
+ errno = ENOMEM;
+ return -1;
+ }
+ ret = write(fd, s, strlen(s));
+ free(s);
+ return ret;
+}
+#endif
+
+#if !defined(HAVE_DPRINTF) || !defined(HAVE_C99_VSNPRINTF)
+int rep_dprintf(int fd, const char *format, ...)
+{
+ int ret;
+ va_list ap;
+
+ va_start(ap, format);
+ ret = vdprintf(fd, format, ap);
+ va_end(ap);
+
+ return ret;
+}
+#endif
+
+#ifndef HAVE_GET_CURRENT_DIR_NAME
+char *rep_get_current_dir_name(void)
+{
+ char buf[PATH_MAX+1];
+ char *p;
+ p = getcwd(buf, sizeof(buf));
+ if (p == NULL) {
+ return NULL;
+ }
+ return strdup(p);
+}
+#endif
+
+#ifndef HAVE_STRERROR_R
+int rep_strerror_r(int errnum, char *buf, size_t buflen)
+{
+ char *s = strerror(errnum);
+ if (strlen(s)+1 > buflen) {
+ errno = ERANGE;
+ return -1;
+ }
+ strncpy(buf, s, buflen);
+ return 0;
+}
+#endif
+
+#ifndef HAVE_CLOCK_GETTIME
+int rep_clock_gettime(clockid_t clk_id, struct timespec *tp)
+{
+ struct timeval tval;
+ switch (clk_id) {
+ case 0: /* CLOCK_REALTIME :*/
+#ifdef HAVE_GETTIMEOFDAY_TZ
+ gettimeofday(&tval,NULL);
+#else
+ gettimeofday(&tval);
+#endif
+ tp->tv_sec = tval.tv_sec;
+ tp->tv_nsec = tval.tv_usec * 1000;
+ break;
+ default:
+ errno = EINVAL;
+ return -1;
+ }
+ return 0;
+}
+#endif
+
+#ifndef HAVE_MEMALIGN
+void *rep_memalign( size_t align, size_t size )
+{
+#if defined(HAVE_POSIX_MEMALIGN)
+ void *p = NULL;
+ int ret = posix_memalign( &p, align, size );
+ if ( ret == 0 )
+ return p;
+
+ return NULL;
+#else
+ /* On *BSD systems memaligns doesn't exist, but memory will
+ * be aligned on allocations of > pagesize. */
+#if defined(SYSCONF_SC_PAGESIZE)
+ size_t pagesize = (size_t)sysconf(_SC_PAGESIZE);
+#elif defined(HAVE_GETPAGESIZE)
+ size_t pagesize = (size_t)getpagesize();
+#else
+ size_t pagesize = (size_t)-1;
+#endif
+ if (pagesize == (size_t)-1) {
+ errno = ENOSYS;
+ return NULL;
+ }
+ if (size < pagesize) {
+ size = pagesize;
+ }
+ return malloc(size);
+#endif
+}
+#endif
+
+#ifndef HAVE_GETPEEREID
+int rep_getpeereid(int s, uid_t *uid, gid_t *gid)
+{
+#if defined(HAVE_PEERCRED)
+ struct ucred cred;
+ socklen_t cred_len = sizeof(struct ucred);
+ int ret;
+
+#undef getsockopt
+ ret = getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void *)&cred, &cred_len);
+ if (ret != 0) {
+ return -1;
+ }
+
+ if (cred_len != sizeof(struct ucred)) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ *uid = cred.uid;
+ *gid = cred.gid;
+ return 0;
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+#endif
+
+#ifndef HAVE_USLEEP
+int rep_usleep(useconds_t sec)
+{
+ struct timeval tval;
+ /*
+ * Fake it with select...
+ */
+ tval.tv_sec = 0;
+ tval.tv_usec = usecs/1000;
+ select(0,NULL,NULL,NULL,&tval);
+ return 0;
+}
+#endif /* HAVE_USLEEP */
+
+#ifndef HAVE_SETPROCTITLE
+void rep_setproctitle(const char *fmt, ...)
+{
+}
+#endif
diff --git a/lib/replace/replace.h b/lib/replace/replace.h
new file mode 100644
index 0000000..3ff4e36
--- /dev/null
+++ b/lib/replace/replace.h
@@ -0,0 +1,916 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ macros to go along with the lib/replace/ portability layer code
+
+ Copyright (C) Andrew Tridgell 2005
+ Copyright (C) Jelmer Vernooij 2006-2008
+ Copyright (C) Jeremy Allison 2007.
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _LIBREPLACE_REPLACE_H
+#define _LIBREPLACE_REPLACE_H
+
+#ifndef NO_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef HAVE_STANDARDS_H
+#include <standards.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <errno.h>
+
+#ifndef HAVE_DECL_EWOULDBLOCK
+#define EWOULDBLOCK EAGAIN
+#endif
+
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#include "win32_replace.h"
+#endif
+
+
+#ifdef HAVE_INTTYPES_H
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+#elif HAVE_STDINT_H
+#include <stdint.h>
+/* force off HAVE_INTTYPES_H so that roken doesn't try to include both,
+ which causes a warning storm on irix */
+#undef HAVE_INTTYPES_H
+#endif
+
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+
+#ifndef __PRI64_PREFIX
+# if __WORDSIZE == 64 && ! defined __APPLE__
+# define __PRI64_PREFIX "l"
+# else
+# define __PRI64_PREFIX "ll"
+# endif
+#endif
+
+/* Decimal notation. */
+#ifndef PRId8
+# define PRId8 "d"
+#endif
+#ifndef PRId16
+# define PRId16 "d"
+#endif
+#ifndef PRId32
+# define PRId32 "d"
+#endif
+#ifndef PRId64
+# define PRId64 __PRI64_PREFIX "d"
+#endif
+
+#ifndef PRIi8
+# define PRIi8 "i"
+#endif
+#ifndef PRIi16
+# define PRIi16 "i"
+#endif
+#ifndef PRIi32
+# define PRIi32 "i"
+#endif
+#ifndef PRIi64
+# define PRIi64 __PRI64_PREFIX "i"
+#endif
+
+#ifndef PRIu8
+# define PRIu8 "u"
+#endif
+#ifndef PRIu16
+# define PRIu16 "u"
+#endif
+#ifndef PRIu32
+# define PRIu32 "u"
+#endif
+#ifndef PRIu64
+# define PRIu64 __PRI64_PREFIX "u"
+#endif
+
+#ifndef SCNd8
+# define SCNd8 "hhd"
+#endif
+#ifndef SCNd16
+# define SCNd16 "hd"
+#endif
+#ifndef SCNd32
+# define SCNd32 "d"
+#endif
+#ifndef SCNd64
+# define SCNd64 __PRI64_PREFIX "d"
+#endif
+
+#ifndef SCNi8
+# define SCNi8 "hhi"
+#endif
+#ifndef SCNi16
+# define SCNi16 "hi"
+#endif
+#ifndef SCNi32
+# define SCNi32 "i"
+#endif
+#ifndef SCNi64
+# define SCNi64 __PRI64_PREFIX "i"
+#endif
+
+#ifndef SCNu8
+# define SCNu8 "hhu"
+#endif
+#ifndef SCNu16
+# define SCNu16 "hu"
+#endif
+#ifndef SCNu32
+# define SCNu32 "u"
+#endif
+#ifndef SCNu64
+# define SCNu64 __PRI64_PREFIX "u"
+#endif
+
+#ifdef HAVE_BSD_STRING_H
+#include <bsd/string.h>
+#endif
+
+#ifdef HAVE_BSD_UNISTD_H
+#include <bsd/unistd.h>
+#endif
+
+#ifdef HAVE_STRING_H
+#include <string.h>
+#endif
+
+#ifdef HAVE_STRINGS_H
+#include <strings.h>
+#endif
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_SETPROCTITLE_H
+#include <setproctitle.h>
+#endif
+
+#if STDC_HEADERS
+#include <stdlib.h>
+#include <stddef.h>
+#endif
+
+#ifdef HAVE_LINUX_TYPES_H
+/*
+ * This is needed as some broken header files require this to be included early
+ */
+#include <linux/types.h>
+#endif
+
+#ifndef HAVE_STRERROR
+extern char *sys_errlist[];
+#define strerror(i) sys_errlist[i]
+#endif
+
+#ifndef HAVE_ERRNO_DECL
+extern int errno;
+#endif
+
+#ifndef HAVE_STRDUP
+#define strdup rep_strdup
+char *rep_strdup(const char *s);
+#endif
+
+#ifndef HAVE_MEMMOVE
+#define memmove rep_memmove
+void *rep_memmove(void *dest,const void *src,int size);
+#endif
+
+#ifndef HAVE_MEMMEM
+#define memmem rep_memmem
+void *rep_memmem(const void *haystack, size_t haystacklen,
+ const void *needle, size_t needlelen);
+#endif
+
+#ifndef HAVE_MEMALIGN
+#define memalign rep_memalign
+void *rep_memalign(size_t boundary, size_t size);
+#endif
+
+#ifndef HAVE_MKTIME
+#define mktime rep_mktime
+/* prototype is in "system/time.h" */
+#endif
+
+#ifndef HAVE_TIMEGM
+#define timegm rep_timegm
+/* prototype is in "system/time.h" */
+#endif
+
+#ifndef HAVE_UTIME
+#define utime rep_utime
+/* prototype is in "system/time.h" */
+#endif
+
+#ifndef HAVE_UTIMES
+#define utimes rep_utimes
+/* prototype is in "system/time.h" */
+#endif
+
+#ifndef HAVE_STRLCPY
+#define strlcpy rep_strlcpy
+size_t rep_strlcpy(char *d, const char *s, size_t bufsize);
+#endif
+
+#ifndef HAVE_STRLCAT
+#define strlcat rep_strlcat
+size_t rep_strlcat(char *d, const char *s, size_t bufsize);
+#endif
+
+#if (defined(BROKEN_STRNDUP) || !defined(HAVE_STRNDUP))
+#undef HAVE_STRNDUP
+#define strndup rep_strndup
+char *rep_strndup(const char *s, size_t n);
+#endif
+
+#if (defined(BROKEN_STRNLEN) || !defined(HAVE_STRNLEN))
+#undef HAVE_STRNLEN
+#define strnlen rep_strnlen
+size_t rep_strnlen(const char *s, size_t n);
+#endif
+
+#if !HAVE_DECL_ENVIRON
+#ifdef __APPLE__
+#include <crt_externs.h>
+#define environ (*_NSGetEnviron())
+#else
+extern char **environ;
+#endif
+#endif
+
+#ifndef HAVE_SETENV
+#define setenv rep_setenv
+int rep_setenv(const char *name, const char *value, int overwrite);
+#else
+#ifndef HAVE_SETENV_DECL
+int setenv(const char *name, const char *value, int overwrite);
+#endif
+#endif
+
+#ifndef HAVE_UNSETENV
+#define unsetenv rep_unsetenv
+int rep_unsetenv(const char *name);
+#endif
+
+#ifndef HAVE_SETEUID
+#define seteuid rep_seteuid
+int rep_seteuid(uid_t);
+#endif
+
+#ifndef HAVE_SETEGID
+#define setegid rep_setegid
+int rep_setegid(gid_t);
+#endif
+
+#if (defined(USE_SETRESUID) && !defined(HAVE_SETRESUID_DECL))
+/* stupid glibc */
+int setresuid(uid_t ruid, uid_t euid, uid_t suid);
+#endif
+#if (defined(USE_SETRESUID) && !defined(HAVE_SETRESGID_DECL))
+int setresgid(gid_t rgid, gid_t egid, gid_t sgid);
+#endif
+
+#ifndef HAVE_CHOWN
+#define chown rep_chown
+int rep_chown(const char *path, uid_t uid, gid_t gid);
+#endif
+
+#ifndef HAVE_CHROOT
+#define chroot rep_chroot
+int rep_chroot(const char *dirname);
+#endif
+
+#ifndef HAVE_LINK
+#define link rep_link
+int rep_link(const char *oldpath, const char *newpath);
+#endif
+
+#ifndef HAVE_READLINK
+#define readlink rep_readlink
+ssize_t rep_readlink(const char *path, char *buf, size_t bufsize);
+#endif
+
+#ifndef HAVE_SYMLINK
+#define symlink rep_symlink
+int rep_symlink(const char *oldpath, const char *newpath);
+#endif
+
+#ifndef HAVE_REALPATH
+#define realpath rep_realpath
+char *rep_realpath(const char *path, char *resolved_path);
+#endif
+
+#ifndef HAVE_LCHOWN
+#define lchown rep_lchown
+int rep_lchown(const char *fname,uid_t uid,gid_t gid);
+#endif
+
+#ifdef HAVE_UNIX_H
+#include <unix.h>
+#endif
+
+#ifndef HAVE_SETLINEBUF
+#define setlinebuf rep_setlinebuf
+void rep_setlinebuf(FILE *);
+#endif
+
+#ifndef HAVE_STRCASESTR
+#define strcasestr rep_strcasestr
+char *rep_strcasestr(const char *haystack, const char *needle);
+#endif
+
+#ifndef HAVE_STRTOK_R
+#define strtok_r rep_strtok_r
+char *rep_strtok_r(char *s, const char *delim, char **save_ptr);
+#endif
+
+
+
+#ifndef HAVE_STRTOLL
+#define strtoll rep_strtoll
+long long int rep_strtoll(const char *str, char **endptr, int base);
+#else
+#ifdef HAVE_BSD_STRTOLL
+#define strtoll rep_strtoll
+long long int rep_strtoll(const char *str, char **endptr, int base);
+#endif
+#endif
+
+#ifndef HAVE_STRTOULL
+#define strtoull rep_strtoull
+unsigned long long int rep_strtoull(const char *str, char **endptr, int base);
+#else
+#ifdef HAVE_BSD_STRTOLL /* yes, it's not HAVE_BSD_STRTOULL */
+#define strtoull rep_strtoull
+unsigned long long int rep_strtoull(const char *str, char **endptr, int base);
+#endif
+#endif
+
+#ifndef HAVE_FTRUNCATE
+#define ftruncate rep_ftruncate
+int rep_ftruncate(int,off_t);
+#endif
+
+#ifndef HAVE_INITGROUPS
+#define initgroups rep_initgroups
+int rep_initgroups(char *name, gid_t id);
+#endif
+
+#if !defined(HAVE_BZERO) && defined(HAVE_MEMSET)
+#define bzero(a,b) memset((a),'\0',(b))
+#endif
+
+#ifndef HAVE_DLERROR
+#define dlerror rep_dlerror
+char *rep_dlerror(void);
+#endif
+
+#ifndef HAVE_DLOPEN
+#define dlopen rep_dlopen
+#ifdef DLOPEN_TAKES_UNSIGNED_FLAGS
+void *rep_dlopen(const char *name, unsigned int flags);
+#else
+void *rep_dlopen(const char *name, int flags);
+#endif
+#endif
+
+#ifndef HAVE_DLSYM
+#define dlsym rep_dlsym
+void *rep_dlsym(void *handle, const char *symbol);
+#endif
+
+#ifndef HAVE_DLCLOSE
+#define dlclose rep_dlclose
+int rep_dlclose(void *handle);
+#endif
+
+#ifndef HAVE_SOCKETPAIR
+#define socketpair rep_socketpair
+/* prototype is in system/network.h */
+#endif
+
+#ifndef PRINTF_ATTRIBUTE
+#if (__GNUC__ >= 3) && (__GNUC_MINOR__ >= 1 )
+/** Use gcc attribute to check printf fns. a1 is the 1-based index of
+ * the parameter containing the format, and a2 the index of the first
+ * argument. Note that some gcc 2.x versions don't handle this
+ * properly **/
+#define PRINTF_ATTRIBUTE(a1, a2) __attribute__ ((format (__printf__, a1, a2)))
+#else
+#define PRINTF_ATTRIBUTE(a1, a2)
+#endif
+#endif
+
+#ifndef _DEPRECATED_
+#if (__GNUC__ >= 3) && (__GNUC_MINOR__ >= 1 )
+#define _DEPRECATED_ __attribute__ ((deprecated))
+#else
+#define _DEPRECATED_
+#endif
+#endif
+
+#if !defined(HAVE_VDPRINTF) || !defined(HAVE_C99_VSNPRINTF)
+#define vdprintf rep_vdprintf
+int rep_vdprintf(int fd, const char *format, va_list ap) PRINTF_ATTRIBUTE(2,0);
+#endif
+
+#if !defined(HAVE_DPRINTF) || !defined(HAVE_C99_VSNPRINTF)
+#define dprintf rep_dprintf
+int rep_dprintf(int fd, const char *format, ...) PRINTF_ATTRIBUTE(2,3);
+#endif
+
+#if !defined(HAVE_VASPRINTF) || !defined(HAVE_C99_VSNPRINTF)
+#define vasprintf rep_vasprintf
+int rep_vasprintf(char **ptr, const char *format, va_list ap) PRINTF_ATTRIBUTE(2,0);
+#endif
+
+#if !defined(HAVE_SNPRINTF) || !defined(HAVE_C99_VSNPRINTF)
+#define snprintf rep_snprintf
+int rep_snprintf(char *,size_t ,const char *, ...) PRINTF_ATTRIBUTE(3,4);
+#endif
+
+#if !defined(HAVE_VSNPRINTF) || !defined(HAVE_C99_VSNPRINTF)
+#define vsnprintf rep_vsnprintf
+int rep_vsnprintf(char *,size_t ,const char *, va_list ap) PRINTF_ATTRIBUTE(3,0);
+#endif
+
+#if !defined(HAVE_ASPRINTF) || !defined(HAVE_C99_VSNPRINTF)
+#define asprintf rep_asprintf
+int rep_asprintf(char **,const char *, ...) PRINTF_ATTRIBUTE(2,3);
+#endif
+
+#if !defined(HAVE_C99_VSNPRINTF)
+#ifdef REPLACE_BROKEN_PRINTF
+/*
+ * We do not redefine printf by default
+ * as it breaks the build if system headers
+ * use __attribute__((format(printf, 3, 0)))
+ * instead of __attribute__((format(__printf__, 3, 0)))
+ */
+#define printf rep_printf
+#endif
+int rep_printf(const char *, ...) PRINTF_ATTRIBUTE(1,2);
+#endif
+
+#if !defined(HAVE_C99_VSNPRINTF)
+#define fprintf rep_fprintf
+int rep_fprintf(FILE *stream, const char *, ...) PRINTF_ATTRIBUTE(2,3);
+#endif
+
+#ifndef HAVE_VSYSLOG
+#ifdef HAVE_SYSLOG
+#define vsyslog rep_vsyslog
+void rep_vsyslog (int facility_priority, const char *format, va_list arglist) PRINTF_ATTRIBUTE(2,0);
+#endif
+#endif
+
+/* we used to use these fns, but now we have good replacements
+ for snprintf and vsnprintf */
+#define slprintf snprintf
+
+
+#ifndef HAVE_VA_COPY
+#undef va_copy
+#ifdef HAVE___VA_COPY
+#define va_copy(dest, src) __va_copy(dest, src)
+#else
+#define va_copy(dest, src) (dest) = (src)
+#endif
+#endif
+
+#ifndef HAVE_VOLATILE
+#define volatile
+#endif
+
+#ifndef HAVE_COMPARISON_FN_T
+typedef int (*comparison_fn_t)(const void *, const void *);
+#endif
+
+#ifndef HAVE_WORKING_STRPTIME
+#define strptime rep_strptime
+struct tm;
+char *rep_strptime(const char *buf, const char *format, struct tm *tm);
+#endif
+
+#ifndef HAVE_DUP2
+#define dup2 rep_dup2
+int rep_dup2(int oldfd, int newfd);
+#endif
+
+/* Load header file for dynamic linking stuff */
+#ifdef HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#ifndef RTLD_LAZY
+#define RTLD_LAZY 0
+#endif
+#ifndef RTLD_NOW
+#define RTLD_NOW 0
+#endif
+#ifndef RTLD_GLOBAL
+#define RTLD_GLOBAL 0
+#endif
+
+#ifndef HAVE_SECURE_MKSTEMP
+#define mkstemp(path) rep_mkstemp(path)
+int rep_mkstemp(char *temp);
+#endif
+
+#ifndef HAVE_MKDTEMP
+#define mkdtemp rep_mkdtemp
+char *rep_mkdtemp(char *template);
+#endif
+
+#ifndef HAVE_PREAD
+#define pread rep_pread
+ssize_t rep_pread(int __fd, void *__buf, size_t __nbytes, off_t __offset);
+#define LIBREPLACE_PREAD_REPLACED 1
+#else
+#define LIBREPLACE_PREAD_NOT_REPLACED 1
+#endif
+
+#ifndef HAVE_PWRITE
+#define pwrite rep_pwrite
+ssize_t rep_pwrite(int __fd, const void *__buf, size_t __nbytes, off_t __offset);
+#define LIBREPLACE_PWRITE_REPLACED 1
+#else
+#define LIBREPLACE_PWRITE_NOT_REPLACED 1
+#endif
+
+#if !defined(HAVE_INET_NTOA) || defined(REPLACE_INET_NTOA)
+#define inet_ntoa rep_inet_ntoa
+/* prototype is in "system/network.h" */
+#endif
+
+#ifndef HAVE_INET_PTON
+#define inet_pton rep_inet_pton
+/* prototype is in "system/network.h" */
+#endif
+
+#ifndef HAVE_INET_NTOP
+#define inet_ntop rep_inet_ntop
+/* prototype is in "system/network.h" */
+#endif
+
+#ifndef HAVE_INET_ATON
+#define inet_aton rep_inet_aton
+/* prototype is in "system/network.h" */
+#endif
+
+#ifndef HAVE_CONNECT
+#define connect rep_connect
+/* prototype is in "system/network.h" */
+#endif
+
+#ifndef HAVE_GETHOSTBYNAME
+#define gethostbyname rep_gethostbyname
+/* prototype is in "system/network.h" */
+#endif
+
+#ifndef HAVE_GETIFADDRS
+#define getifaddrs rep_getifaddrs
+/* prototype is in "system/network.h" */
+#endif
+
+#ifndef HAVE_FREEIFADDRS
+#define freeifaddrs rep_freeifaddrs
+/* prototype is in "system/network.h" */
+#endif
+
+#ifndef HAVE_GET_CURRENT_DIR_NAME
+#define get_current_dir_name rep_get_current_dir_name
+char *rep_get_current_dir_name(void);
+#endif
+
+#ifndef HAVE_STRERROR_R
+#define strerror_r rep_strerror_r
+int rep_strerror_r(int errnum, char *buf, size_t buflen);
+#endif
+
+#if !defined(HAVE_CLOCK_GETTIME)
+#define clock_gettime rep_clock_gettime
+#endif
+
+#ifdef HAVE_LIMITS_H
+#include <limits.h>
+#endif
+
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+
+/* The extra casts work around common compiler bugs. */
+#define _TYPE_SIGNED(t) (! ((t) 0 < (t) -1))
+/* The outer cast is needed to work around a bug in Cray C 5.0.3.0.
+ It is necessary at least when t == time_t. */
+#define _TYPE_MINIMUM(t) ((t) (_TYPE_SIGNED (t) \
+ ? ~ (t) 0 << (sizeof (t) * CHAR_BIT - 1) : (t) 0))
+#define _TYPE_MAXIMUM(t) ((t) (~ (t) 0 - _TYPE_MINIMUM (t)))
+
+#ifndef UINT16_MAX
+#define UINT16_MAX 65535
+#endif
+
+#ifndef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#endif
+
+#ifndef UINT64_MAX
+#define UINT64_MAX ((uint64_t)-1)
+#endif
+
+#ifndef INT64_MAX
+#define INT64_MAX 9223372036854775807LL
+#endif
+
+#ifndef CHAR_BIT
+#define CHAR_BIT 8
+#endif
+
+#ifndef INT32_MAX
+#define INT32_MAX _TYPE_MAXIMUM(int32_t)
+#endif
+
+#ifdef HAVE_STDBOOL_H
+#include <stdbool.h>
+#endif
+
+#if !defined(HAVE_BOOL)
+#ifdef HAVE__Bool
+#define bool _Bool
+#else
+typedef int bool;
+#endif
+#endif
+
+#if !defined(HAVE_INTPTR_T)
+typedef long long intptr_t ;
+#endif
+
+#if !defined(HAVE_UINTPTR_T)
+typedef unsigned long long uintptr_t ;
+#endif
+
+#if !defined(HAVE_PTRDIFF_T)
+typedef unsigned long long ptrdiff_t ;
+#endif
+
+/*
+ * to prevent <rpcsvc/yp_prot.h> from doing a redefine of 'bool'
+ *
+ * IRIX, HPUX, MacOS 10 and Solaris need BOOL_DEFINED
+ * Tru64 needs _BOOL_EXISTS
+ * AIX needs _BOOL,_TRUE,_FALSE
+ */
+#ifndef BOOL_DEFINED
+#define BOOL_DEFINED
+#endif
+#ifndef _BOOL_EXISTS
+#define _BOOL_EXISTS
+#endif
+#ifndef _BOOL
+#define _BOOL
+#endif
+
+#ifndef __bool_true_false_are_defined
+#define __bool_true_false_are_defined
+#endif
+
+#ifndef true
+#define true (1)
+#endif
+#ifndef false
+#define false (0)
+#endif
+
+#ifndef _TRUE
+#define _TRUE true
+#endif
+#ifndef _FALSE
+#define _FALSE false
+#endif
+
+#ifndef HAVE_FUNCTION_MACRO
+#ifdef HAVE_func_MACRO
+#define __FUNCTION__ __func__
+#else
+#define __FUNCTION__ ("")
+#endif
+#endif
+
+
+#ifndef MIN
+#define MIN(a,b) ((a)<(b)?(a):(b))
+#endif
+
+#ifndef MAX
+#define MAX(a,b) ((a)>(b)?(a):(b))
+#endif
+
+#if !defined(HAVE_VOLATILE)
+#define volatile
+#endif
+
+/**
+ this is a warning hack. The idea is to use this everywhere that we
+ get the "discarding const" warning from gcc. That doesn't actually
+ fix the problem of course, but it means that when we do get to
+ cleaning them up we can do it by searching the code for
+ discard_const.
+
+ It also means that other error types aren't as swamped by the noise
+ of hundreds of const warnings, so we are more likely to notice when
+ we get new errors.
+
+ Please only add more uses of this macro when you find it
+ _really_ hard to fix const warnings. Our aim is to eventually use
+ this function in only a very few places.
+
+ Also, please call this via the discard_const_p() macro interface, as that
+ makes the return type safe.
+*/
+#define discard_const(ptr) ((void *)((uintptr_t)(ptr)))
+
+/** Type-safe version of discard_const */
+#define discard_const_p(type, ptr) ((type *)discard_const(ptr))
+
+#ifndef __STRING
+#define __STRING(x) #x
+#endif
+
+#ifndef __STRINGSTRING
+#define __STRINGSTRING(x) __STRING(x)
+#endif
+
+#ifndef __LINESTR__
+#define __LINESTR__ __STRINGSTRING(__LINE__)
+#endif
+
+#ifndef __location__
+#define __location__ __FILE__ ":" __LINESTR__
+#endif
+
+/**
+ * zero a structure
+ */
+#define ZERO_STRUCT(x) memset((char *)&(x), 0, sizeof(x))
+
+/**
+ * zero a structure given a pointer to the structure
+ */
+#define ZERO_STRUCTP(x) do { if ((x) != NULL) memset((char *)(x), 0, sizeof(*(x))); } while(0)
+
+/**
+ * zero a structure given a pointer to the structure - no zero check
+ */
+#define ZERO_STRUCTPN(x) memset((char *)(x), 0, sizeof(*(x)))
+
+/* zero an array - note that sizeof(array) must work - ie. it must not be a
+ pointer */
+#define ZERO_ARRAY(x) memset((char *)(x), 0, sizeof(x))
+
+/**
+ * work out how many elements there are in a static array
+ */
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
+
+/**
+ * pointer difference macro
+ */
+#define PTR_DIFF(p1,p2) ((ptrdiff_t)(((const char *)(p1)) - (const char *)(p2)))
+
+#if MMAP_BLACKLIST
+#undef HAVE_MMAP
+#endif
+
+#ifdef __COMPAR_FN_T
+#define QSORT_CAST (__compar_fn_t)
+#endif
+
+#ifndef QSORT_CAST
+#define QSORT_CAST (int (*)(const void *, const void *))
+#endif
+
+#ifndef PATH_MAX
+#define PATH_MAX 1024
+#endif
+
+#ifndef MAX_DNS_NAME_LENGTH
+#define MAX_DNS_NAME_LENGTH 256 /* Actually 255 but +1 for terminating null. */
+#endif
+
+#ifndef HAVE_CRYPT
+char *ufc_crypt(const char *key, const char *salt);
+#define crypt ufc_crypt
+#else
+#ifdef HAVE_CRYPT_H
+#include <crypt.h>
+#endif
+#endif
+
+/* these macros gain us a few percent of speed on gcc */
+#if (__GNUC__ >= 3)
+/* the strange !! is to ensure that __builtin_expect() takes either 0 or 1
+ as its first argument */
+#ifndef likely
+#define likely(x) __builtin_expect(!!(x), 1)
+#endif
+#ifndef unlikely
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+#else
+#ifndef likely
+#define likely(x) (x)
+#endif
+#ifndef unlikely
+#define unlikely(x) (x)
+#endif
+#endif
+
+#ifndef HAVE_FDATASYNC
+#define fdatasync(fd) fsync(fd)
+#elif !defined(HAVE_DECL_FDATASYNC)
+int fdatasync(int );
+#endif
+
+/* these are used to mark symbols as local to a shared lib, or
+ * publicly available via the shared lib API */
+#ifndef _PUBLIC_
+#ifdef HAVE_VISIBILITY_ATTR
+#define _PUBLIC_ __attribute__((visibility("default")))
+#else
+#define _PUBLIC_
+#endif
+#endif
+
+#ifndef _PRIVATE_
+#ifdef HAVE_VISIBILITY_ATTR
+# define _PRIVATE_ __attribute__((visibility("hidden")))
+#else
+# define _PRIVATE_
+#endif
+#endif
+
+#ifndef HAVE_POLL
+#define poll rep_poll
+/* prototype is in "system/network.h" */
+#endif
+
+#ifndef HAVE_GETPEEREID
+#define getpeereid rep_getpeereid
+int rep_getpeereid(int s, uid_t *uid, gid_t *gid);
+#endif
+
+#ifndef HAVE_USLEEP
+#define usleep rep_usleep
+typedef long useconds_t;
+int usleep(useconds_t);
+#endif
+
+#ifndef HAVE_SETPROCTITLE
+#define setproctitle rep_setproctitle
+void rep_setproctitle(const char *fmt, ...) PRINTF_ATTRIBUTE(1, 2);
+#endif
+
+bool nss_wrapper_enabled(void);
+bool nss_wrapper_hosts_enabled(void);
+bool socket_wrapper_enabled(void);
+bool uid_wrapper_enabled(void);
+
+/* Needed for Solaris atomic_add_XX functions. */
+#if defined(HAVE_SYS_ATOMIC_H)
+#include <sys/atomic.h>
+#endif
+
+#endif /* _LIBREPLACE_REPLACE_H */
diff --git a/lib/replace/snprintf.c b/lib/replace/snprintf.c
new file mode 100644
index 0000000..86ba74c
--- /dev/null
+++ b/lib/replace/snprintf.c
@@ -0,0 +1,1534 @@
+/*
+ * NOTE: If you change this file, please merge it into rsync, samba, etc.
+ */
+
+/*
+ * Copyright Patrick Powell 1995
+ * This code is based on code written by Patrick Powell (papowell@astart.com)
+ * It may be used for any purpose as long as this notice remains intact
+ * on all source code distributions
+ */
+
+/**************************************************************
+ * Original:
+ * Patrick Powell Tue Apr 11 09:48:21 PDT 1995
+ * A bombproof version of doprnt (dopr) included.
+ * Sigh. This sort of thing is always nasty do deal with. Note that
+ * the version here does not include floating point...
+ *
+ * snprintf() is used instead of sprintf() as it does limit checks
+ * for string length. This covers a nasty loophole.
+ *
+ * The other functions are there to prevent NULL pointers from
+ * causing nast effects.
+ *
+ * More Recently:
+ * Brandon Long <blong@fiction.net> 9/15/96 for mutt 0.43
+ * This was ugly. It is still ugly. I opted out of floating point
+ * numbers, but the formatter understands just about everything
+ * from the normal C string format, at least as far as I can tell from
+ * the Solaris 2.5 printf(3S) man page.
+ *
+ * Brandon Long <blong@fiction.net> 10/22/97 for mutt 0.87.1
+ * Ok, added some minimal floating point support, which means this
+ * probably requires libm on most operating systems. Don't yet
+ * support the exponent (e,E) and sigfig (g,G). Also, fmtint()
+ * was pretty badly broken, it just wasn't being exercised in ways
+ * which showed it, so that's been fixed. Also, formated the code
+ * to mutt conventions, and removed dead code left over from the
+ * original. Also, there is now a builtin-test, just compile with:
+ * gcc -DTEST_SNPRINTF -o snprintf snprintf.c -lm
+ * and run snprintf for results.
+ *
+ * Thomas Roessler <roessler@guug.de> 01/27/98 for mutt 0.89i
+ * The PGP code was using unsigned hexadecimal formats.
+ * Unfortunately, unsigned formats simply didn't work.
+ *
+ * Michael Elkins <me@cs.hmc.edu> 03/05/98 for mutt 0.90.8
+ * The original code assumed that both snprintf() and vsnprintf() were
+ * missing. Some systems only have snprintf() but not vsnprintf(), so
+ * the code is now broken down under HAVE_SNPRINTF and HAVE_VSNPRINTF.
+ *
+ * Andrew Tridgell (tridge@samba.org) Oct 1998
+ * fixed handling of %.0f
+ * added test for HAVE_LONG_DOUBLE
+ *
+ * tridge@samba.org, idra@samba.org, April 2001
+ * got rid of fcvt code (twas buggy and made testing harder)
+ * added C99 semantics
+ *
+ * date: 2002/12/19 19:56:31; author: herb; state: Exp; lines: +2 -0
+ * actually print args for %g and %e
+ *
+ * date: 2002/06/03 13:37:52; author: jmcd; state: Exp; lines: +8 -0
+ * Since includes.h isn't included here, VA_COPY has to be defined here. I don't
+ * see any include file that is guaranteed to be here, so I'm defining it
+ * locally. Fixes AIX and Solaris builds.
+ *
+ * date: 2002/06/03 03:07:24; author: tridge; state: Exp; lines: +5 -13
+ * put the ifdef for HAVE_VA_COPY in one place rather than in lots of
+ * functions
+ *
+ * date: 2002/05/17 14:51:22; author: jmcd; state: Exp; lines: +21 -4
+ * Fix usage of va_list passed as an arg. Use __va_copy before using it
+ * when it exists.
+ *
+ * date: 2002/04/16 22:38:04; author: idra; state: Exp; lines: +20 -14
+ * Fix incorrect zpadlen handling in fmtfp.
+ * Thanks to Ollie Oldham <ollie.oldham@metro-optix.com> for spotting it.
+ * few mods to make it easier to compile the tests.
+ * addedd the "Ollie" test to the floating point ones.
+ *
+ * Martin Pool (mbp@samba.org) April 2003
+ * Remove NO_CONFIG_H so that the test case can be built within a source
+ * tree with less trouble.
+ * Remove unnecessary SAFE_FREE() definition.
+ *
+ * Martin Pool (mbp@samba.org) May 2003
+ * Put in a prototype for dummy_snprintf() to quiet compiler warnings.
+ *
+ * Move #endif to make sure VA_COPY, LDOUBLE, etc are defined even
+ * if the C library has some snprintf functions already.
+ *
+ * Darren Tucker (dtucker@zip.com.au) 2005
+ * Fix bug allowing read overruns of the source string with "%.*s"
+ * Usually harmless unless the read runs outside the process' allocation
+ * (eg if your malloc does guard pages) in which case it will segfault.
+ * From OpenSSH. Also added test for same.
+ *
+ * Simo Sorce (idra@samba.org) Jan 2006
+ *
+ * Add support for position independent parameters
+ * fix fmtstr now it conforms to sprintf wrt min.max
+ *
+ **************************************************************/
+
+#include "replace.h"
+#include "system/locale.h"
+
+#ifdef TEST_SNPRINTF /* need math library headers for testing */
+
+/* In test mode, we pretend that this system doesn't have any snprintf
+ * functions, regardless of what config.h says. */
+# undef HAVE_SNPRINTF
+# undef HAVE_VSNPRINTF
+# undef HAVE_C99_VSNPRINTF
+# undef HAVE_ASPRINTF
+# undef HAVE_VASPRINTF
+# include <math.h>
+#endif /* TEST_SNPRINTF */
+
+#if defined(HAVE_SNPRINTF) && defined(HAVE_VSNPRINTF) && defined(HAVE_C99_VSNPRINTF)
+/* only include stdio.h if we are not re-defining snprintf or vsnprintf */
+#include <stdio.h>
+ /* make the compiler happy with an empty file */
+ void dummy_snprintf(void);
+ void dummy_snprintf(void) {}
+#endif /* HAVE_SNPRINTF, etc */
+
+/* yes this really must be a ||. Don't muck with this (tridge) */
+#if !defined(HAVE_VSNPRINTF) || !defined(HAVE_C99_VSNPRINTF)
+
+#ifdef HAVE_LONG_DOUBLE
+#define LDOUBLE long double
+#else
+#define LDOUBLE double
+#endif
+
+#ifdef HAVE_LONG_LONG
+#define LLONG long long
+#else
+#define LLONG long
+#endif
+
+#ifndef VA_COPY
+#ifdef HAVE_VA_COPY
+#define VA_COPY(dest, src) va_copy(dest, src)
+#else
+#ifdef HAVE___VA_COPY
+#define VA_COPY(dest, src) __va_copy(dest, src)
+#else
+#define VA_COPY(dest, src) (dest) = (src)
+#endif
+#endif
+
+/*
+ * dopr(): poor man's version of doprintf
+ */
+
+/* format read states */
+#define DP_S_DEFAULT 0
+#define DP_S_FLAGS 1
+#define DP_S_MIN 2
+#define DP_S_DOT 3
+#define DP_S_MAX 4
+#define DP_S_MOD 5
+#define DP_S_CONV 6
+#define DP_S_DONE 7
+
+/* format flags - Bits */
+#define DP_F_MINUS (1 << 0)
+#define DP_F_PLUS (1 << 1)
+#define DP_F_SPACE (1 << 2)
+#define DP_F_NUM (1 << 3)
+#define DP_F_ZERO (1 << 4)
+#define DP_F_UP (1 << 5)
+#define DP_F_UNSIGNED (1 << 6)
+
+/* Conversion Flags */
+#define DP_C_CHAR 1
+#define DP_C_SHORT 2
+#define DP_C_LONG 3
+#define DP_C_LDOUBLE 4
+#define DP_C_LLONG 5
+#define DP_C_SIZET 6
+
+/* Chunk types */
+#define CNK_FMT_STR 0
+#define CNK_INT 1
+#define CNK_OCTAL 2
+#define CNK_UINT 3
+#define CNK_HEX 4
+#define CNK_FLOAT 5
+#define CNK_CHAR 6
+#define CNK_STRING 7
+#define CNK_PTR 8
+#define CNK_NUM 9
+#define CNK_PRCNT 10
+
+#define char_to_int(p) ((p)- '0')
+#ifndef MAX
+#define MAX(p,q) (((p) >= (q)) ? (p) : (q))
+#endif
+
+struct pr_chunk {
+ int type; /* chunk type */
+ int num; /* parameter number */
+ int min;
+ int max;
+ int flags;
+ int cflags;
+ int start;
+ int len;
+ LLONG value;
+ LDOUBLE fvalue;
+ char *strvalue;
+ void *pnum;
+ struct pr_chunk *min_star;
+ struct pr_chunk *max_star;
+ struct pr_chunk *next;
+};
+
+struct pr_chunk_x {
+ struct pr_chunk **chunks;
+ int num;
+};
+
+static int dopr(char *buffer, size_t maxlen, const char *format,
+ va_list args_in);
+static void fmtstr(char *buffer, size_t *currlen, size_t maxlen,
+ char *value, int flags, int min, int max);
+static void fmtint(char *buffer, size_t *currlen, size_t maxlen,
+ LLONG value, int base, int min, int max, int flags);
+static void fmtfp(char *buffer, size_t *currlen, size_t maxlen,
+ LDOUBLE fvalue, int min, int max, int flags);
+static void dopr_outch(char *buffer, size_t *currlen, size_t maxlen, char c);
+static struct pr_chunk *new_chunk(void);
+static int add_cnk_list_entry(struct pr_chunk_x **list,
+ int max_num, struct pr_chunk *chunk);
+
+static int dopr(char *buffer, size_t maxlen, const char *format, va_list args_in)
+{
+ char ch;
+ int state;
+ int pflag;
+ int pnum;
+ int pfirst;
+ size_t currlen;
+ va_list args;
+ const char *base;
+ struct pr_chunk *chunks = NULL;
+ struct pr_chunk *cnk = NULL;
+ struct pr_chunk_x *clist = NULL;
+ int max_pos;
+ int ret = -1;
+
+ VA_COPY(args, args_in);
+
+ state = DP_S_DEFAULT;
+ pfirst = 1;
+ pflag = 0;
+ pnum = 0;
+
+ max_pos = 0;
+ base = format;
+ ch = *format++;
+
+ /* retrieve the string structure as chunks */
+ while (state != DP_S_DONE) {
+ if (ch == '\0')
+ state = DP_S_DONE;
+
+ switch(state) {
+ case DP_S_DEFAULT:
+
+ if (cnk) {
+ cnk->next = new_chunk();
+ cnk = cnk->next;
+ } else {
+ cnk = new_chunk();
+ }
+ if (!cnk) goto done;
+ if (!chunks) chunks = cnk;
+
+ if (ch == '%') {
+ state = DP_S_FLAGS;
+ ch = *format++;
+ } else {
+ cnk->type = CNK_FMT_STR;
+ cnk->start = format - base -1;
+ while ((ch != '\0') && (ch != '%')) ch = *format++;
+ cnk->len = format - base - cnk->start -1;
+ }
+ break;
+ case DP_S_FLAGS:
+ switch (ch) {
+ case '-':
+ cnk->flags |= DP_F_MINUS;
+ ch = *format++;
+ break;
+ case '+':
+ cnk->flags |= DP_F_PLUS;
+ ch = *format++;
+ break;
+ case ' ':
+ cnk->flags |= DP_F_SPACE;
+ ch = *format++;
+ break;
+ case '#':
+ cnk->flags |= DP_F_NUM;
+ ch = *format++;
+ break;
+ case '0':
+ cnk->flags |= DP_F_ZERO;
+ ch = *format++;
+ break;
+ case 'I':
+ /* internationalization not supported yet */
+ ch = *format++;
+ break;
+ default:
+ state = DP_S_MIN;
+ break;
+ }
+ break;
+ case DP_S_MIN:
+ if (isdigit((unsigned char)ch)) {
+ cnk->min = 10 * cnk->min + char_to_int (ch);
+ ch = *format++;
+ } else if (ch == '$') {
+ if (!pfirst && !pflag) {
+ /* parameters must be all positioned or none */
+ goto done;
+ }
+ if (pfirst) {
+ pfirst = 0;
+ pflag = 1;
+ }
+ if (cnk->min == 0) /* what ?? */
+ goto done;
+ cnk->num = cnk->min;
+ cnk->min = 0;
+ ch = *format++;
+ } else if (ch == '*') {
+ if (pfirst) pfirst = 0;
+ cnk->min_star = new_chunk();
+ if (!cnk->min_star) /* out of memory :-( */
+ goto done;
+ cnk->min_star->type = CNK_INT;
+ if (pflag) {
+ int num;
+ ch = *format++;
+ if (!isdigit((unsigned char)ch)) {
+ /* parameters must be all positioned or none */
+ goto done;
+ }
+ for (num = 0; isdigit((unsigned char)ch); ch = *format++) {
+ num = 10 * num + char_to_int(ch);
+ }
+ cnk->min_star->num = num;
+ if (ch != '$') /* what ?? */
+ goto done;
+ } else {
+ cnk->min_star->num = ++pnum;
+ }
+ max_pos = add_cnk_list_entry(&clist, max_pos, cnk->min_star);
+ if (max_pos == 0) /* out of memory :-( */
+ goto done;
+ ch = *format++;
+ state = DP_S_DOT;
+ } else {
+ if (pfirst) pfirst = 0;
+ state = DP_S_DOT;
+ }
+ break;
+ case DP_S_DOT:
+ if (ch == '.') {
+ state = DP_S_MAX;
+ ch = *format++;
+ } else {
+ state = DP_S_MOD;
+ }
+ break;
+ case DP_S_MAX:
+ if (isdigit((unsigned char)ch)) {
+ if (cnk->max < 0)
+ cnk->max = 0;
+ cnk->max = 10 * cnk->max + char_to_int (ch);
+ ch = *format++;
+ } else if (ch == '$') {
+ if (!pfirst && !pflag) {
+ /* parameters must be all positioned or none */
+ goto done;
+ }
+ if (cnk->max <= 0) /* what ?? */
+ goto done;
+ cnk->num = cnk->max;
+ cnk->max = -1;
+ ch = *format++;
+ } else if (ch == '*') {
+ cnk->max_star = new_chunk();
+ if (!cnk->max_star) /* out of memory :-( */
+ goto done;
+ cnk->max_star->type = CNK_INT;
+ if (pflag) {
+ int num;
+ ch = *format++;
+ if (!isdigit((unsigned char)ch)) {
+ /* parameters must be all positioned or none */
+ goto done;
+ }
+ for (num = 0; isdigit((unsigned char)ch); ch = *format++) {
+ num = 10 * num + char_to_int(ch);
+ }
+ cnk->max_star->num = num;
+ if (ch != '$') /* what ?? */
+ goto done;
+ } else {
+ cnk->max_star->num = ++pnum;
+ }
+ max_pos = add_cnk_list_entry(&clist, max_pos, cnk->max_star);
+ if (max_pos == 0) /* out of memory :-( */
+ goto done;
+
+ ch = *format++;
+ state = DP_S_MOD;
+ } else {
+ state = DP_S_MOD;
+ }
+ break;
+ case DP_S_MOD:
+ switch (ch) {
+ case 'h':
+ cnk->cflags = DP_C_SHORT;
+ ch = *format++;
+ if (ch == 'h') {
+ cnk->cflags = DP_C_CHAR;
+ ch = *format++;
+ }
+ break;
+ case 'l':
+ cnk->cflags = DP_C_LONG;
+ ch = *format++;
+ if (ch == 'l') { /* It's a long long */
+ cnk->cflags = DP_C_LLONG;
+ ch = *format++;
+ }
+ break;
+ case 'j':
+ cnk->cflags = DP_C_LLONG;
+ ch = *format++;
+ break;
+ case 'L':
+ cnk->cflags = DP_C_LDOUBLE;
+ ch = *format++;
+ break;
+ case 'z':
+ cnk->cflags = DP_C_SIZET;
+ ch = *format++;
+ break;
+ default:
+ break;
+ }
+ state = DP_S_CONV;
+ break;
+ case DP_S_CONV:
+ if (cnk->num == 0) cnk->num = ++pnum;
+ max_pos = add_cnk_list_entry(&clist, max_pos, cnk);
+ if (max_pos == 0) /* out of memory :-( */
+ goto done;
+
+ switch (ch) {
+ case 'd':
+ case 'i':
+ cnk->type = CNK_INT;
+ break;
+ case 'o':
+ cnk->type = CNK_OCTAL;
+ cnk->flags |= DP_F_UNSIGNED;
+ break;
+ case 'u':
+ cnk->type = CNK_UINT;
+ cnk->flags |= DP_F_UNSIGNED;
+ break;
+ case 'X':
+ cnk->flags |= DP_F_UP;
+ case 'x':
+ cnk->type = CNK_HEX;
+ cnk->flags |= DP_F_UNSIGNED;
+ break;
+ case 'A':
+ /* hex float not supported yet */
+ case 'E':
+ case 'G':
+ case 'F':
+ cnk->flags |= DP_F_UP;
+ case 'a':
+ /* hex float not supported yet */
+ case 'e':
+ case 'f':
+ case 'g':
+ cnk->type = CNK_FLOAT;
+ break;
+ case 'c':
+ cnk->type = CNK_CHAR;
+ break;
+ case 's':
+ cnk->type = CNK_STRING;
+ break;
+ case 'p':
+ cnk->type = CNK_PTR;
+ cnk->flags |= DP_F_UNSIGNED;
+ break;
+ case 'n':
+ cnk->type = CNK_NUM;
+ break;
+ case '%':
+ cnk->type = CNK_PRCNT;
+ break;
+ default:
+ /* Unknown, bail out*/
+ goto done;
+ }
+ ch = *format++;
+ state = DP_S_DEFAULT;
+ break;
+ case DP_S_DONE:
+ break;
+ default:
+ /* hmm? */
+ break; /* some picky compilers need this */
+ }
+ }
+
+ /* retrieve the format arguments */
+ for (pnum = 0; pnum < max_pos; pnum++) {
+ int i;
+
+ if (clist[pnum].num == 0) {
+ /* ignoring a parameter should not be permitted
+ * all parameters must be matched at least once
+ * BUT seem some system ignore this rule ...
+ * at least my glibc based system does --SSS
+ */
+#ifdef DEBUG_SNPRINTF
+ printf("parameter at position %d not used\n", pnum+1);
+#endif
+ /* eat the parameter */
+ va_arg (args, int);
+ continue;
+ }
+ for (i = 1; i < clist[pnum].num; i++) {
+ if (clist[pnum].chunks[0]->type != clist[pnum].chunks[i]->type) {
+ /* nooo noo no!
+ * all the references to a parameter
+ * must be of the same type
+ */
+ goto done;
+ }
+ }
+ cnk = clist[pnum].chunks[0];
+ switch (cnk->type) {
+ case CNK_INT:
+ if (cnk->cflags == DP_C_SHORT)
+ cnk->value = va_arg (args, int);
+ else if (cnk->cflags == DP_C_LONG)
+ cnk->value = va_arg (args, long int);
+ else if (cnk->cflags == DP_C_LLONG)
+ cnk->value = va_arg (args, LLONG);
+ else if (cnk->cflags == DP_C_SIZET)
+ cnk->value = va_arg (args, ssize_t);
+ else
+ cnk->value = va_arg (args, int);
+
+ for (i = 1; i < clist[pnum].num; i++) {
+ clist[pnum].chunks[i]->value = cnk->value;
+ }
+ break;
+
+ case CNK_OCTAL:
+ case CNK_UINT:
+ case CNK_HEX:
+ if (cnk->cflags == DP_C_SHORT)
+ cnk->value = va_arg (args, unsigned int);
+ else if (cnk->cflags == DP_C_LONG)
+ cnk->value = (unsigned long int)va_arg (args, unsigned long int);
+ else if (cnk->cflags == DP_C_LLONG)
+ cnk->value = (LLONG)va_arg (args, unsigned LLONG);
+ else if (cnk->cflags == DP_C_SIZET)
+ cnk->value = (size_t)va_arg (args, size_t);
+ else
+ cnk->value = (unsigned int)va_arg (args, unsigned int);
+
+ for (i = 1; i < clist[pnum].num; i++) {
+ clist[pnum].chunks[i]->value = cnk->value;
+ }
+ break;
+
+ case CNK_FLOAT:
+ if (cnk->cflags == DP_C_LDOUBLE)
+ cnk->fvalue = va_arg (args, LDOUBLE);
+ else
+ cnk->fvalue = va_arg (args, double);
+
+ for (i = 1; i < clist[pnum].num; i++) {
+ clist[pnum].chunks[i]->fvalue = cnk->fvalue;
+ }
+ break;
+
+ case CNK_CHAR:
+ cnk->value = va_arg (args, int);
+
+ for (i = 1; i < clist[pnum].num; i++) {
+ clist[pnum].chunks[i]->value = cnk->value;
+ }
+ break;
+
+ case CNK_STRING:
+ cnk->strvalue = va_arg (args, char *);
+ if (!cnk->strvalue) cnk->strvalue = "(NULL)";
+
+ for (i = 1; i < clist[pnum].num; i++) {
+ clist[pnum].chunks[i]->strvalue = cnk->strvalue;
+ }
+ break;
+
+ case CNK_PTR:
+ cnk->strvalue = va_arg (args, void *);
+ for (i = 1; i < clist[pnum].num; i++) {
+ clist[pnum].chunks[i]->strvalue = cnk->strvalue;
+ }
+ break;
+
+ case CNK_NUM:
+ if (cnk->cflags == DP_C_CHAR)
+ cnk->pnum = va_arg (args, char *);
+ else if (cnk->cflags == DP_C_SHORT)
+ cnk->pnum = va_arg (args, short int *);
+ else if (cnk->cflags == DP_C_LONG)
+ cnk->pnum = va_arg (args, long int *);
+ else if (cnk->cflags == DP_C_LLONG)
+ cnk->pnum = va_arg (args, LLONG *);
+ else if (cnk->cflags == DP_C_SIZET)
+ cnk->pnum = va_arg (args, ssize_t *);
+ else
+ cnk->pnum = va_arg (args, int *);
+
+ for (i = 1; i < clist[pnum].num; i++) {
+ clist[pnum].chunks[i]->pnum = cnk->pnum;
+ }
+ break;
+
+ case CNK_PRCNT:
+ break;
+
+ default:
+ /* what ?? */
+ goto done;
+ }
+ }
+ /* print out the actual string from chunks */
+ currlen = 0;
+ cnk = chunks;
+ while (cnk) {
+ int len, min, max;
+
+ if (cnk->min_star) min = cnk->min_star->value;
+ else min = cnk->min;
+ if (cnk->max_star) max = cnk->max_star->value;
+ else max = cnk->max;
+
+ switch (cnk->type) {
+
+ case CNK_FMT_STR:
+ if (maxlen != 0 && maxlen > currlen) {
+ if (maxlen > (currlen + cnk->len)) len = cnk->len;
+ else len = maxlen - currlen;
+
+ memcpy(&(buffer[currlen]), &(base[cnk->start]), len);
+ }
+ currlen += cnk->len;
+
+ break;
+
+ case CNK_INT:
+ case CNK_UINT:
+ fmtint (buffer, &currlen, maxlen, cnk->value, 10, min, max, cnk->flags);
+ break;
+
+ case CNK_OCTAL:
+ fmtint (buffer, &currlen, maxlen, cnk->value, 8, min, max, cnk->flags);
+ break;
+
+ case CNK_HEX:
+ fmtint (buffer, &currlen, maxlen, cnk->value, 16, min, max, cnk->flags);
+ break;
+
+ case CNK_FLOAT:
+ fmtfp (buffer, &currlen, maxlen, cnk->fvalue, min, max, cnk->flags);
+ break;
+
+ case CNK_CHAR:
+ dopr_outch (buffer, &currlen, maxlen, cnk->value);
+ break;
+
+ case CNK_STRING:
+ if (max == -1) {
+ max = strlen(cnk->strvalue);
+ }
+ fmtstr (buffer, &currlen, maxlen, cnk->strvalue, cnk->flags, min, max);
+ break;
+
+ case CNK_PTR:
+ fmtint (buffer, &currlen, maxlen, (long)(cnk->strvalue), 16, min, max, cnk->flags);
+ break;
+
+ case CNK_NUM:
+ if (cnk->cflags == DP_C_CHAR)
+ *((char *)(cnk->pnum)) = (char)currlen;
+ else if (cnk->cflags == DP_C_SHORT)
+ *((short int *)(cnk->pnum)) = (short int)currlen;
+ else if (cnk->cflags == DP_C_LONG)
+ *((long int *)(cnk->pnum)) = (long int)currlen;
+ else if (cnk->cflags == DP_C_LLONG)
+ *((LLONG *)(cnk->pnum)) = (LLONG)currlen;
+ else if (cnk->cflags == DP_C_SIZET)
+ *((ssize_t *)(cnk->pnum)) = (ssize_t)currlen;
+ else
+ *((int *)(cnk->pnum)) = (int)currlen;
+ break;
+
+ case CNK_PRCNT:
+ dopr_outch (buffer, &currlen, maxlen, '%');
+ break;
+
+ default:
+ /* what ?? */
+ goto done;
+ }
+ cnk = cnk->next;
+ }
+ if (maxlen != 0) {
+ if (currlen < maxlen - 1)
+ buffer[currlen] = '\0';
+ else if (maxlen > 0)
+ buffer[maxlen - 1] = '\0';
+ }
+ ret = currlen;
+
+done:
+ va_end(args);
+
+ while (chunks) {
+ cnk = chunks->next;
+ free(chunks);
+ chunks = cnk;
+ }
+ if (clist) {
+ for (pnum = 0; pnum < max_pos; pnum++) {
+ if (clist[pnum].chunks) free(clist[pnum].chunks);
+ }
+ free(clist);
+ }
+ return ret;
+}
+
+static void fmtstr(char *buffer, size_t *currlen, size_t maxlen,
+ char *value, int flags, int min, int max)
+{
+ int padlen, strln; /* amount to pad */
+ int cnt = 0;
+
+#ifdef DEBUG_SNPRINTF
+ printf("fmtstr min=%d max=%d s=[%s]\n", min, max, value);
+#endif
+ if (value == 0) {
+ value = "<NULL>";
+ }
+
+ for (strln = 0; strln < max && value[strln]; ++strln); /* strlen */
+ padlen = min - strln;
+ if (padlen < 0)
+ padlen = 0;
+ if (flags & DP_F_MINUS)
+ padlen = -padlen; /* Left Justify */
+
+ while (padlen > 0) {
+ dopr_outch (buffer, currlen, maxlen, ' ');
+ --padlen;
+ }
+ while (*value && (cnt < max)) {
+ dopr_outch (buffer, currlen, maxlen, *value++);
+ ++cnt;
+ }
+ while (padlen < 0) {
+ dopr_outch (buffer, currlen, maxlen, ' ');
+ ++padlen;
+ }
+}
+
+/* Have to handle DP_F_NUM (ie 0x and 0 alternates) */
+
+static void fmtint(char *buffer, size_t *currlen, size_t maxlen,
+ LLONG value, int base, int min, int max, int flags)
+{
+ int signvalue = 0;
+ unsigned LLONG uvalue;
+ char convert[20];
+ int place = 0;
+ int spadlen = 0; /* amount to space pad */
+ int zpadlen = 0; /* amount to zero pad */
+ int caps = 0;
+
+ if (max < 0)
+ max = 0;
+
+ uvalue = value;
+
+ if(!(flags & DP_F_UNSIGNED)) {
+ if( value < 0 ) {
+ signvalue = '-';
+ uvalue = -value;
+ } else {
+ if (flags & DP_F_PLUS) /* Do a sign (+/i) */
+ signvalue = '+';
+ else if (flags & DP_F_SPACE)
+ signvalue = ' ';
+ }
+ }
+
+ if (flags & DP_F_UP) caps = 1; /* Should characters be upper case? */
+
+ do {
+ convert[place++] =
+ (caps? "0123456789ABCDEF":"0123456789abcdef")
+ [uvalue % (unsigned)base ];
+ uvalue = (uvalue / (unsigned)base );
+ } while(uvalue && (place < 20));
+ if (place == 20) place--;
+ convert[place] = 0;
+
+ zpadlen = max - place;
+ spadlen = min - MAX (max, place) - (signvalue ? 1 : 0);
+ if (zpadlen < 0) zpadlen = 0;
+ if (spadlen < 0) spadlen = 0;
+ if (flags & DP_F_ZERO) {
+ zpadlen = MAX(zpadlen, spadlen);
+ spadlen = 0;
+ }
+ if (flags & DP_F_MINUS)
+ spadlen = -spadlen; /* Left Justifty */
+
+#ifdef DEBUG_SNPRINTF
+ printf("zpad: %d, spad: %d, min: %d, max: %d, place: %d\n",
+ zpadlen, spadlen, min, max, place);
+#endif
+
+ /* Spaces */
+ while (spadlen > 0) {
+ dopr_outch (buffer, currlen, maxlen, ' ');
+ --spadlen;
+ }
+
+ /* Sign */
+ if (signvalue)
+ dopr_outch (buffer, currlen, maxlen, signvalue);
+
+ /* Zeros */
+ if (zpadlen > 0) {
+ while (zpadlen > 0) {
+ dopr_outch (buffer, currlen, maxlen, '0');
+ --zpadlen;
+ }
+ }
+
+ /* Digits */
+ while (place > 0)
+ dopr_outch (buffer, currlen, maxlen, convert[--place]);
+
+ /* Left Justified spaces */
+ while (spadlen < 0) {
+ dopr_outch (buffer, currlen, maxlen, ' ');
+ ++spadlen;
+ }
+}
+
+static LDOUBLE abs_val(LDOUBLE value)
+{
+ LDOUBLE result = value;
+
+ if (value < 0)
+ result = -value;
+
+ return result;
+}
+
+static LDOUBLE POW10(int exp)
+{
+ LDOUBLE result = 1;
+
+ while (exp) {
+ result *= 10;
+ exp--;
+ }
+
+ return result;
+}
+
+static LLONG ROUND(LDOUBLE value)
+{
+ LLONG intpart;
+
+ intpart = (LLONG)value;
+ value = value - intpart;
+ if (value >= 0.5) intpart++;
+
+ return intpart;
+}
+
+/* a replacement for modf that doesn't need the math library. Should
+ be portable, but slow */
+static double my_modf(double x0, double *iptr)
+{
+ int i;
+ LLONG l=0;
+ double x = x0;
+ double f = 1.0;
+
+ for (i=0;i<100;i++) {
+ l = (long)x;
+ if (l <= (x+1) && l >= (x-1)) break;
+ x *= 0.1;
+ f *= 10.0;
+ }
+
+ if (i == 100) {
+ /* yikes! the number is beyond what we can handle. What do we do? */
+ (*iptr) = 0;
+ return 0;
+ }
+
+ if (i != 0) {
+ double i2;
+ double ret;
+
+ ret = my_modf(x0-l*f, &i2);
+ (*iptr) = l*f + i2;
+ return ret;
+ }
+
+ (*iptr) = l;
+ return x - (*iptr);
+}
+
+
+static void fmtfp (char *buffer, size_t *currlen, size_t maxlen,
+ LDOUBLE fvalue, int min, int max, int flags)
+{
+ int signvalue = 0;
+ double ufvalue;
+ char iconvert[311];
+ char fconvert[311];
+ int iplace = 0;
+ int fplace = 0;
+ int padlen = 0; /* amount to pad */
+ int zpadlen = 0;
+ int caps = 0;
+ int idx;
+ double intpart;
+ double fracpart;
+ double temp;
+
+ /*
+ * AIX manpage says the default is 0, but Solaris says the default
+ * is 6, and sprintf on AIX defaults to 6
+ */
+ if (max < 0)
+ max = 6;
+
+ ufvalue = abs_val (fvalue);
+
+ if (fvalue < 0) {
+ signvalue = '-';
+ } else {
+ if (flags & DP_F_PLUS) { /* Do a sign (+/i) */
+ signvalue = '+';
+ } else {
+ if (flags & DP_F_SPACE)
+ signvalue = ' ';
+ }
+ }
+
+#if 0
+ if (flags & DP_F_UP) caps = 1; /* Should characters be upper case? */
+#endif
+
+#if 0
+ if (max == 0) ufvalue += 0.5; /* if max = 0 we must round */
+#endif
+
+ /*
+ * Sorry, we only support 9 digits past the decimal because of our
+ * conversion method
+ */
+ if (max > 9)
+ max = 9;
+
+ /* We "cheat" by converting the fractional part to integer by
+ * multiplying by a factor of 10
+ */
+
+ temp = ufvalue;
+ my_modf(temp, &intpart);
+
+ fracpart = ROUND((POW10(max)) * (ufvalue - intpart));
+
+ if (fracpart >= POW10(max)) {
+ intpart++;
+ fracpart -= POW10(max);
+ }
+
+
+ /* Convert integer part */
+ do {
+ temp = intpart*0.1;
+ my_modf(temp, &intpart);
+ idx = (int) ((temp -intpart +0.05)* 10.0);
+ /* idx = (int) (((double)(temp*0.1) -intpart +0.05) *10.0); */
+ /* printf ("%llf, %f, %x\n", temp, intpart, idx); */
+ iconvert[iplace++] =
+ (caps? "0123456789ABCDEF":"0123456789abcdef")[idx];
+ } while (intpart && (iplace < 311));
+ if (iplace == 311) iplace--;
+ iconvert[iplace] = 0;
+
+ /* Convert fractional part */
+ if (fracpart)
+ {
+ do {
+ temp = fracpart*0.1;
+ my_modf(temp, &fracpart);
+ idx = (int) ((temp -fracpart +0.05)* 10.0);
+ /* idx = (int) ((((temp/10) -fracpart) +0.05) *10); */
+ /* printf ("%lf, %lf, %ld\n", temp, fracpart, idx ); */
+ fconvert[fplace++] =
+ (caps? "0123456789ABCDEF":"0123456789abcdef")[idx];
+ } while(fracpart && (fplace < 311));
+ if (fplace == 311) fplace--;
+ }
+ fconvert[fplace] = 0;
+
+ /* -1 for decimal point, another -1 if we are printing a sign */
+ padlen = min - iplace - max - 1 - ((signvalue) ? 1 : 0);
+ zpadlen = max - fplace;
+ if (zpadlen < 0) zpadlen = 0;
+ if (padlen < 0)
+ padlen = 0;
+ if (flags & DP_F_MINUS)
+ padlen = -padlen; /* Left Justifty */
+
+ if ((flags & DP_F_ZERO) && (padlen > 0)) {
+ if (signvalue) {
+ dopr_outch (buffer, currlen, maxlen, signvalue);
+ --padlen;
+ signvalue = 0;
+ }
+ while (padlen > 0) {
+ dopr_outch (buffer, currlen, maxlen, '0');
+ --padlen;
+ }
+ }
+ while (padlen > 0) {
+ dopr_outch (buffer, currlen, maxlen, ' ');
+ --padlen;
+ }
+ if (signvalue)
+ dopr_outch (buffer, currlen, maxlen, signvalue);
+
+ while (iplace > 0)
+ dopr_outch (buffer, currlen, maxlen, iconvert[--iplace]);
+
+#ifdef DEBUG_SNPRINTF
+ printf("fmtfp: fplace=%d zpadlen=%d\n", fplace, zpadlen);
+#endif
+
+ /*
+ * Decimal point. This should probably use locale to find the correct
+ * char to print out.
+ */
+ if (max > 0) {
+ dopr_outch (buffer, currlen, maxlen, '.');
+
+ while (zpadlen > 0) {
+ dopr_outch (buffer, currlen, maxlen, '0');
+ --zpadlen;
+ }
+
+ while (fplace > 0)
+ dopr_outch (buffer, currlen, maxlen, fconvert[--fplace]);
+ }
+
+ while (padlen < 0) {
+ dopr_outch (buffer, currlen, maxlen, ' ');
+ ++padlen;
+ }
+}
+
+static void dopr_outch(char *buffer, size_t *currlen, size_t maxlen, char c)
+{
+ if (*currlen < maxlen) {
+ buffer[(*currlen)] = c;
+ }
+ (*currlen)++;
+}
+
+static struct pr_chunk *new_chunk(void) {
+ struct pr_chunk *new_c = (struct pr_chunk *)malloc(sizeof(struct pr_chunk));
+
+ if (!new_c)
+ return NULL;
+
+ new_c->type = 0;
+ new_c->num = 0;
+ new_c->min = 0;
+ new_c->min_star = NULL;
+ new_c->max = -1;
+ new_c->max_star = NULL;
+ new_c->flags = 0;
+ new_c->cflags = 0;
+ new_c->start = 0;
+ new_c->len = 0;
+ new_c->value = 0;
+ new_c->fvalue = 0;
+ new_c->strvalue = NULL;
+ new_c->pnum = NULL;
+ new_c->next = NULL;
+
+ return new_c;
+}
+
+static int add_cnk_list_entry(struct pr_chunk_x **list,
+ int max_num, struct pr_chunk *chunk) {
+ struct pr_chunk_x *l;
+ struct pr_chunk **c;
+ int max;
+ int cnum;
+ int i, pos;
+
+ if (chunk->num > max_num) {
+ max = chunk->num;
+
+ if (*list == NULL) {
+ l = (struct pr_chunk_x *)malloc(sizeof(struct pr_chunk_x) * max);
+ pos = 0;
+ } else {
+ l = (struct pr_chunk_x *)realloc(*list, sizeof(struct pr_chunk_x) * max);
+ pos = max_num;
+ }
+ if (l == NULL) {
+ for (i = 0; i < max; i++) {
+ if ((*list)[i].chunks) free((*list)[i].chunks);
+ }
+ return 0;
+ }
+ for (i = pos; i < max; i++) {
+ l[i].chunks = NULL;
+ l[i].num = 0;
+ }
+ } else {
+ l = *list;
+ max = max_num;
+ }
+
+ i = chunk->num - 1;
+ cnum = l[i].num + 1;
+ if (l[i].chunks == NULL) {
+ c = (struct pr_chunk **)malloc(sizeof(struct pr_chunk *) * cnum);
+ } else {
+ c = (struct pr_chunk **)realloc(l[i].chunks, sizeof(struct pr_chunk *) * cnum);
+ }
+ if (c == NULL) {
+ for (i = 0; i < max; i++) {
+ if (l[i].chunks) free(l[i].chunks);
+ }
+ return 0;
+ }
+ c[l[i].num] = chunk;
+ l[i].chunks = c;
+ l[i].num = cnum;
+
+ *list = l;
+ return max;
+}
+
+ int rep_vsnprintf (char *str, size_t count, const char *fmt, va_list args)
+{
+ return dopr(str, count, fmt, args);
+}
+#endif
+
+/* yes this really must be a ||. Don't muck with this (tridge)
+ *
+ * The logic for these two is that we need our own definition if the
+ * OS *either* has no definition of *sprintf, or if it does have one
+ * that doesn't work properly according to the autoconf test.
+ */
+#if !defined(HAVE_SNPRINTF) || !defined(HAVE_C99_VSNPRINTF)
+ int rep_snprintf(char *str,size_t count,const char *fmt,...)
+{
+ size_t ret;
+ va_list ap;
+
+ va_start(ap, fmt);
+ ret = vsnprintf(str, count, fmt, ap);
+ va_end(ap);
+ return ret;
+}
+#endif
+
+#ifndef HAVE_C99_VSNPRINTF
+ int rep_printf(const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+ char *s;
+
+ s = NULL;
+ va_start(ap, fmt);
+ ret = vasprintf(&s, fmt, ap);
+ va_end(ap);
+
+ if (s) {
+ fwrite(s, 1, strlen(s), stdout);
+ }
+ free(s);
+
+ return ret;
+}
+#endif
+
+#ifndef HAVE_C99_VSNPRINTF
+ int rep_fprintf(FILE *stream, const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+ char *s;
+
+ s = NULL;
+ va_start(ap, fmt);
+ ret = vasprintf(&s, fmt, ap);
+ va_end(ap);
+
+ if (s) {
+ fwrite(s, 1, strlen(s), stream);
+ }
+ free(s);
+
+ return ret;
+}
+#endif
+
+#endif
+
+#if !defined(HAVE_VASPRINTF) || !defined(HAVE_C99_VSNPRINTF)
+ int rep_vasprintf(char **ptr, const char *format, va_list ap)
+{
+ int ret;
+ va_list ap2;
+
+ VA_COPY(ap2, ap);
+ ret = vsnprintf(NULL, 0, format, ap2);
+ va_end(ap2);
+ if (ret < 0) return ret;
+
+ (*ptr) = (char *)malloc(ret+1);
+ if (!*ptr) return -1;
+
+ VA_COPY(ap2, ap);
+ ret = vsnprintf(*ptr, ret+1, format, ap2);
+ va_end(ap2);
+
+ return ret;
+}
+#endif
+
+#if !defined(HAVE_ASPRINTF) || !defined(HAVE_C99_VSNPRINTF)
+ int rep_asprintf(char **ptr, const char *format, ...)
+{
+ va_list ap;
+ int ret;
+
+ *ptr = NULL;
+ va_start(ap, format);
+ ret = vasprintf(ptr, format, ap);
+ va_end(ap);
+
+ return ret;
+}
+#endif
+
+#ifdef TEST_SNPRINTF
+
+ int sprintf(char *str,const char *fmt,...);
+ int printf(const char *fmt,...);
+
+ int main (void)
+{
+ char buf1[1024];
+ char buf2[1024];
+ char *buf3;
+ char *fp_fmt[] = {
+ "%1.1f",
+ "%-1.5f",
+ "%1.5f",
+ "%123.9f",
+ "%10.5f",
+ "% 10.5f",
+ "%+22.9f",
+ "%+4.9f",
+ "%01.3f",
+ "%4f",
+ "%3.1f",
+ "%3.2f",
+ "%.0f",
+ "%f",
+ "%-8.8f",
+ "%-9.9f",
+ NULL
+ };
+ double fp_nums[] = { 6442452944.1234, -1.5, 134.21, 91340.2, 341.1234, 203.9, 0.96, 0.996,
+ 0.9996, 1.996, 4.136, 5.030201, 0.00205,
+ /* END LIST */ 0};
+ char *int_fmt[] = {
+ "%-1.5d",
+ "%1.5d",
+ "%123.9d",
+ "%5.5d",
+ "%10.5d",
+ "% 10.5d",
+ "%+22.33d",
+ "%01.3d",
+ "%4d",
+ "%d",
+ NULL
+ };
+ long int_nums[] = { -1, 134, 91340, 341, 0203, 1234567890, 0};
+ char *str_fmt[] = {
+ "%10.5s",
+ "%-10.5s",
+ "%5.10s",
+ "%-5.10s",
+ "%10.1s",
+ "%0.10s",
+ "%10.0s",
+ "%1.10s",
+ "%s",
+ "%.1s",
+ "%.10s",
+ "%10s",
+ NULL
+ };
+ char *str_vals[] = {"hello", "a", "", "a longer string", NULL};
+#ifdef HAVE_LONG_LONG
+ char *ll_fmt[] = {
+ "%llu",
+ NULL
+ };
+ LLONG ll_nums[] = { 134, 91340, 341, 0203, 1234567890, 128006186140000000LL, 0};
+#endif
+ int x, y;
+ int fail = 0;
+ int num = 0;
+ int l1, l2;
+ char *ss_fmt[] = {
+ "%zd",
+ "%zu",
+ NULL
+ };
+ size_t ss_nums[] = {134, 91340, 123456789, 0203, 1234567890, 0};
+
+ printf ("Testing snprintf format codes against system sprintf...\n");
+
+ for (x = 0; fp_fmt[x] ; x++) {
+ for (y = 0; fp_nums[y] != 0 ; y++) {
+ buf1[0] = buf2[0] = '\0';
+ l1 = snprintf(buf1, sizeof(buf1), fp_fmt[x], fp_nums[y]);
+ l2 = sprintf (buf2, fp_fmt[x], fp_nums[y]);
+ buf1[1023] = buf2[1023] = '\0';
+ if (strcmp (buf1, buf2) || (l1 != l2)) {
+ printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n",
+ fp_fmt[x], l1, buf1, l2, buf2);
+ fail++;
+ }
+ num++;
+ }
+ }
+
+ for (x = 0; int_fmt[x] ; x++) {
+ for (y = 0; int_nums[y] != 0 ; y++) {
+ buf1[0] = buf2[0] = '\0';
+ l1 = snprintf(buf1, sizeof(buf1), int_fmt[x], int_nums[y]);
+ l2 = sprintf (buf2, int_fmt[x], int_nums[y]);
+ buf1[1023] = buf2[1023] = '\0';
+ if (strcmp (buf1, buf2) || (l1 != l2)) {
+ printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n",
+ int_fmt[x], l1, buf1, l2, buf2);
+ fail++;
+ }
+ num++;
+ }
+ }
+
+ for (x = 0; str_fmt[x] ; x++) {
+ for (y = 0; str_vals[y] != 0 ; y++) {
+ buf1[0] = buf2[0] = '\0';
+ l1 = snprintf(buf1, sizeof(buf1), str_fmt[x], str_vals[y]);
+ l2 = sprintf (buf2, str_fmt[x], str_vals[y]);
+ buf1[1023] = buf2[1023] = '\0';
+ if (strcmp (buf1, buf2) || (l1 != l2)) {
+ printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n",
+ str_fmt[x], l1, buf1, l2, buf2);
+ fail++;
+ }
+ num++;
+ }
+ }
+
+#ifdef HAVE_LONG_LONG
+ for (x = 0; ll_fmt[x] ; x++) {
+ for (y = 0; ll_nums[y] != 0 ; y++) {
+ buf1[0] = buf2[0] = '\0';
+ l1 = snprintf(buf1, sizeof(buf1), ll_fmt[x], ll_nums[y]);
+ l2 = sprintf (buf2, ll_fmt[x], ll_nums[y]);
+ buf1[1023] = buf2[1023] = '\0';
+ if (strcmp (buf1, buf2) || (l1 != l2)) {
+ printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n",
+ ll_fmt[x], l1, buf1, l2, buf2);
+ fail++;
+ }
+ num++;
+ }
+ }
+#endif
+
+#define BUFSZ 2048
+
+ buf1[0] = buf2[0] = '\0';
+ if ((buf3 = malloc(BUFSZ)) == NULL) {
+ fail++;
+ } else {
+ num++;
+ memset(buf3, 'a', BUFSZ);
+ snprintf(buf1, sizeof(buf1), "%.*s", 1, buf3);
+ buf1[1023] = '\0';
+ if (strcmp(buf1, "a") != 0) {
+ printf("length limit buf1 '%s' expected 'a'\n", buf1);
+ fail++;
+ }
+ }
+
+ buf1[0] = buf2[0] = '\0';
+ l1 = snprintf(buf1, sizeof(buf1), "%4$*1$d %2$s %3$*1$.*1$f", 3, "pos test", 12.3456, 9);
+ l2 = sprintf(buf2, "%4$*1$d %2$s %3$*1$.*1$f", 3, "pos test", 12.3456, 9);
+ buf1[1023] = buf2[1023] = '\0';
+ if (strcmp(buf1, buf2) || (l1 != l2)) {
+ printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n",
+ "%4$*1$d %2$s %3$*1$.*1$f", l1, buf1, l2, buf2);
+ fail++;
+ }
+
+ buf1[0] = buf2[0] = '\0';
+ l1 = snprintf(buf1, sizeof(buf1), "%4$*4$d %2$s %3$*4$.*4$f", 3, "pos test", 12.3456, 9);
+ l2 = sprintf(buf2, "%4$*4$d %2$s %3$*4$.*4$f", 3, "pos test", 12.3456, 9);
+ buf1[1023] = buf2[1023] = '\0';
+ if (strcmp(buf1, buf2)) {
+ printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n",
+ "%4$*1$d %2$s %3$*1$.*1$f", l1, buf1, l2, buf2);
+ fail++;
+ }
+
+ for (x = 0; ss_fmt[x] ; x++) {
+ for (y = 0; ss_nums[y] != 0 ; y++) {
+ buf1[0] = buf2[0] = '\0';
+ l1 = snprintf(buf1, sizeof(buf1), ss_fmt[x], ss_nums[y]);
+ l2 = sprintf (buf2, ss_fmt[x], ss_nums[y]);
+ buf1[1023] = buf2[1023] = '\0';
+ if (strcmp (buf1, buf2) || (l1 != l2)) {
+ printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n",
+ ss_fmt[x], l1, buf1, l2, buf2);
+ fail++;
+ }
+ num++;
+ }
+ }
+#if 0
+ buf1[0] = buf2[0] = '\0';
+ l1 = snprintf(buf1, sizeof(buf1), "%lld", (LLONG)1234567890);
+ l2 = sprintf(buf2, "%lld", (LLONG)1234567890);
+ buf1[1023] = buf2[1023] = '\0';
+ if (strcmp(buf1, buf2)) {
+ printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n",
+ "%lld", l1, buf1, l2, buf2);
+ fail++;
+ }
+
+ buf1[0] = buf2[0] = '\0';
+ l1 = snprintf(buf1, sizeof(buf1), "%Lf", (LDOUBLE)890.1234567890123);
+ l2 = sprintf(buf2, "%Lf", (LDOUBLE)890.1234567890123);
+ buf1[1023] = buf2[1023] = '\0';
+ if (strcmp(buf1, buf2)) {
+ printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n",
+ "%Lf", l1, buf1, l2, buf2);
+ fail++;
+ }
+#endif
+ printf ("%d tests failed out of %d.\n", fail, num);
+
+ printf("seeing how many digits we support\n");
+ {
+ double v0 = 0.12345678901234567890123456789012345678901;
+ for (x=0; x<100; x++) {
+ double p = pow(10, x);
+ double r = v0*p;
+ snprintf(buf1, sizeof(buf1), "%1.1f", r);
+ sprintf(buf2, "%1.1f", r);
+ if (strcmp(buf1, buf2)) {
+ printf("we seem to support %d digits\n", x-1);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+#endif /* TEST_SNPRINTF */
diff --git a/lib/replace/socket.c b/lib/replace/socket.c
new file mode 100644
index 0000000..4cd9d2e
--- /dev/null
+++ b/lib/replace/socket.c
@@ -0,0 +1,39 @@
+/*
+ * Unix SMB/CIFS implementation.
+ *
+ * Dummy replacements for socket functions.
+ *
+ * Copyright (C) Michael Adam <obnox@samba.org> 2008
+ *
+ * ** NOTE! The following LGPL license applies to the replace
+ * ** library. This does NOT imply that all of Samba is released
+ * ** under the LGPL
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 3 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "replace.h"
+#include "system/network.h"
+
+int rep_connect(int sockfd, const struct sockaddr *serv_addr, socklen_t addrlen)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
+struct hostent *rep_gethostbyname(const char *name)
+{
+ errno = ENOSYS;
+ return NULL;
+}
diff --git a/lib/replace/socketpair.c b/lib/replace/socketpair.c
new file mode 100644
index 0000000..c775730
--- /dev/null
+++ b/lib/replace/socketpair.c
@@ -0,0 +1,46 @@
+/*
+ * Unix SMB/CIFS implementation.
+ * replacement routines for broken systems
+ * Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2006
+ * Copyright (C) Michael Adam <obnox@samba.org> 2008
+ *
+ * ** NOTE! The following LGPL license applies to the replace
+ * ** library. This does NOT imply that all of Samba is released
+ * ** under the LGPL
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 3 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "replace.h"
+#include "system/network.h"
+
+int rep_socketpair(int d, int type, int protocol, int sv[2])
+{
+ if (d != AF_UNIX) {
+ errno = EAFNOSUPPORT;
+ return -1;
+ }
+
+ if (protocol != 0) {
+ errno = EPROTONOSUPPORT;
+ return -1;
+ }
+
+ if (type != SOCK_STREAM) {
+ errno = EOPNOTSUPP;
+ return -1;
+ }
+
+ return pipe(sv);
+}
diff --git a/lib/replace/strptime.c b/lib/replace/strptime.c
new file mode 100644
index 0000000..20e5d8c
--- /dev/null
+++ b/lib/replace/strptime.c
@@ -0,0 +1,993 @@
+/* Convert a string representation of time to a time value.
+ Copyright (C) 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper@cygnus.com>, 1996.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 3 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ see <http://www.gnu.org/licenses/>. */
+
+/* XXX This version of the implementation is not really complete.
+ Some of the fields cannot add information alone. But if seeing
+ some of them in the same format (such as year, week and weekday)
+ this is enough information for determining the date. */
+
+#include "replace.h"
+#include "system/locale.h"
+#include "system/time.h"
+
+#ifndef __P
+# if defined (__GNUC__) || (defined (__STDC__) && __STDC__)
+# define __P(args) args
+# else
+# define __P(args) ()
+# endif /* GCC. */
+#endif /* Not __P. */
+
+#if ! HAVE_LOCALTIME_R && ! defined localtime_r
+# ifdef _LIBC
+# define localtime_r __localtime_r
+# else
+/* Approximate localtime_r as best we can in its absence. */
+# define localtime_r my_localtime_r
+static struct tm *localtime_r __P ((const time_t *, struct tm *));
+static struct tm *
+localtime_r (t, tp)
+ const time_t *t;
+ struct tm *tp;
+{
+ struct tm *l = localtime (t);
+ if (! l)
+ return 0;
+ *tp = *l;
+ return tp;
+}
+# endif /* ! _LIBC */
+#endif /* ! HAVE_LOCALTIME_R && ! defined (localtime_r) */
+
+
+#define match_char(ch1, ch2) if (ch1 != ch2) return NULL
+#if defined __GNUC__ && __GNUC__ >= 2
+# define match_string(cs1, s2) \
+ ({ size_t len = strlen (cs1); \
+ int result = strncasecmp ((cs1), (s2), len) == 0; \
+ if (result) (s2) += len; \
+ result; })
+#else
+/* Oh come on. Get a reasonable compiler. */
+# define match_string(cs1, s2) \
+ (strncasecmp ((cs1), (s2), strlen (cs1)) ? 0 : ((s2) += strlen (cs1), 1))
+#endif
+/* We intentionally do not use isdigit() for testing because this will
+ lead to problems with the wide character version. */
+#define get_number(from, to, n) \
+ do { \
+ int __n = n; \
+ val = 0; \
+ while (*rp == ' ') \
+ ++rp; \
+ if (*rp < '0' || *rp > '9') \
+ return NULL; \
+ do { \
+ val *= 10; \
+ val += *rp++ - '0'; \
+ } while (--__n > 0 && val * 10 <= to && *rp >= '0' && *rp <= '9'); \
+ if (val < from || val > to) \
+ return NULL; \
+ } while (0)
+#ifdef _NL_CURRENT
+# define get_alt_number(from, to, n) \
+ ({ \
+ __label__ do_normal; \
+ if (*decided != raw) \
+ { \
+ const char *alts = _NL_CURRENT (LC_TIME, ALT_DIGITS); \
+ int __n = n; \
+ int any = 0; \
+ while (*rp == ' ') \
+ ++rp; \
+ val = 0; \
+ do { \
+ val *= 10; \
+ while (*alts != '\0') \
+ { \
+ size_t len = strlen (alts); \
+ if (strncasecmp (alts, rp, len) == 0) \
+ break; \
+ alts += len + 1; \
+ ++val; \
+ } \
+ if (*alts == '\0') \
+ { \
+ if (*decided == not && ! any) \
+ goto do_normal; \
+ /* If we haven't read anything it's an error. */ \
+ if (! any) \
+ return NULL; \
+ /* Correct the premature multiplication. */ \
+ val /= 10; \
+ break; \
+ } \
+ else \
+ *decided = loc; \
+ } while (--__n > 0 && val * 10 <= to); \
+ if (val < from || val > to) \
+ return NULL; \
+ } \
+ else \
+ { \
+ do_normal: \
+ get_number (from, to, n); \
+ } \
+ 0; \
+ })
+#else
+# define get_alt_number(from, to, n) \
+ /* We don't have the alternate representation. */ \
+ get_number(from, to, n)
+#endif
+#define recursive(new_fmt) \
+ (*(new_fmt) != '\0' \
+ && (rp = strptime_internal (rp, (new_fmt), tm, decided, era_cnt)) != NULL)
+
+
+#ifdef _LIBC
+/* This is defined in locale/C-time.c in the GNU libc. */
+extern const struct locale_data _nl_C_LC_TIME;
+extern const unsigned short int __mon_yday[2][13];
+
+# define weekday_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (DAY_1)].string)
+# define ab_weekday_name \
+ (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (ABDAY_1)].string)
+# define month_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (MON_1)].string)
+# define ab_month_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (ABMON_1)].string)
+# define HERE_D_T_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (D_T_FMT)].string)
+# define HERE_D_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (D_FMT)].string)
+# define HERE_AM_STR (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (AM_STR)].string)
+# define HERE_PM_STR (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (PM_STR)].string)
+# define HERE_T_FMT_AMPM \
+ (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (T_FMT_AMPM)].string)
+# define HERE_T_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (T_FMT)].string)
+
+# define strncasecmp(s1, s2, n) __strncasecmp (s1, s2, n)
+#else
+static char const weekday_name[][10] =
+ {
+ "Sunday", "Monday", "Tuesday", "Wednesday",
+ "Thursday", "Friday", "Saturday"
+ };
+static char const ab_weekday_name[][4] =
+ {
+ "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
+ };
+static char const month_name[][10] =
+ {
+ "January", "February", "March", "April", "May", "June",
+ "July", "August", "September", "October", "November", "December"
+ };
+static char const ab_month_name[][4] =
+ {
+ "Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
+ };
+# define HERE_D_T_FMT "%a %b %e %H:%M:%S %Y"
+# define HERE_D_FMT "%m/%d/%y"
+# define HERE_AM_STR "AM"
+# define HERE_PM_STR "PM"
+# define HERE_T_FMT_AMPM "%I:%M:%S %p"
+# define HERE_T_FMT "%H:%M:%S"
+
+static const unsigned short int __mon_yday[2][13] =
+ {
+ /* Normal years. */
+ { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
+ /* Leap years. */
+ { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
+ };
+#endif
+
+/* Status of lookup: do we use the locale data or the raw data? */
+enum locale_status { not, loc, raw };
+
+
+#ifndef __isleap
+/* Nonzero if YEAR is a leap year (every 4 years,
+ except every 100th isn't, and every 400th is). */
+# define __isleap(year) \
+ ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
+#endif
+
+/* Compute the day of the week. */
+static void
+day_of_the_week (struct tm *tm)
+{
+ /* We know that January 1st 1970 was a Thursday (= 4). Compute the
+ the difference between this data in the one on TM and so determine
+ the weekday. */
+ int corr_year = 1900 + tm->tm_year - (tm->tm_mon < 2);
+ int wday = (-473
+ + (365 * (tm->tm_year - 70))
+ + (corr_year / 4)
+ - ((corr_year / 4) / 25) + ((corr_year / 4) % 25 < 0)
+ + (((corr_year / 4) / 25) / 4)
+ + __mon_yday[0][tm->tm_mon]
+ + tm->tm_mday - 1);
+ tm->tm_wday = ((wday % 7) + 7) % 7;
+}
+
+/* Compute the day of the year. */
+static void
+day_of_the_year (struct tm *tm)
+{
+ tm->tm_yday = (__mon_yday[__isleap (1900 + tm->tm_year)][tm->tm_mon]
+ + (tm->tm_mday - 1));
+}
+
+static char *
+#ifdef _LIBC
+internal_function
+#endif
+strptime_internal __P ((const char *rp, const char *fmt, struct tm *tm,
+ enum locale_status *decided, int era_cnt));
+
+static char *
+#ifdef _LIBC
+internal_function
+#endif
+strptime_internal (rp, fmt, tm, decided, era_cnt)
+ const char *rp;
+ const char *fmt;
+ struct tm *tm;
+ enum locale_status *decided;
+ int era_cnt;
+{
+ int cnt;
+ size_t val;
+ int have_I, is_pm;
+ int century, want_century;
+ int want_era;
+ int have_wday, want_xday;
+ int have_yday;
+ int have_mon, have_mday;
+#ifdef _NL_CURRENT
+ const char *rp_backup;
+ size_t num_eras;
+ struct era_entry *era;
+
+ era = NULL;
+#endif
+
+ have_I = is_pm = 0;
+ century = -1;
+ want_century = 0;
+ want_era = 0;
+
+ have_wday = want_xday = have_yday = have_mon = have_mday = 0;
+
+ while (*fmt != '\0')
+ {
+ /* A white space in the format string matches 0 more or white
+ space in the input string. */
+ if (isspace (*fmt))
+ {
+ while (isspace (*rp))
+ ++rp;
+ ++fmt;
+ continue;
+ }
+
+ /* Any character but `%' must be matched by the same character
+ in the iput string. */
+ if (*fmt != '%')
+ {
+ match_char (*fmt++, *rp++);
+ continue;
+ }
+
+ ++fmt;
+#ifndef _NL_CURRENT
+ /* We need this for handling the `E' modifier. */
+ start_over:
+#endif
+
+#ifdef _NL_CURRENT
+ /* Make back up of current processing pointer. */
+ rp_backup = rp;
+#endif
+
+ switch (*fmt++)
+ {
+ case '%':
+ /* Match the `%' character itself. */
+ match_char ('%', *rp++);
+ break;
+ case 'a':
+ case 'A':
+ /* Match day of week. */
+ for (cnt = 0; cnt < 7; ++cnt)
+ {
+#ifdef _NL_CURRENT
+ if (*decided !=raw)
+ {
+ if (match_string (_NL_CURRENT (LC_TIME, DAY_1 + cnt), rp))
+ {
+ if (*decided == not
+ && strcmp (_NL_CURRENT (LC_TIME, DAY_1 + cnt),
+ weekday_name[cnt]))
+ *decided = loc;
+ break;
+ }
+ if (match_string (_NL_CURRENT (LC_TIME, ABDAY_1 + cnt), rp))
+ {
+ if (*decided == not
+ && strcmp (_NL_CURRENT (LC_TIME, ABDAY_1 + cnt),
+ ab_weekday_name[cnt]))
+ *decided = loc;
+ break;
+ }
+ }
+#endif
+ if (*decided != loc
+ && (match_string (weekday_name[cnt], rp)
+ || match_string (ab_weekday_name[cnt], rp)))
+ {
+ *decided = raw;
+ break;
+ }
+ }
+ if (cnt == 7)
+ /* Does not match a weekday name. */
+ return NULL;
+ tm->tm_wday = cnt;
+ have_wday = 1;
+ break;
+ case 'b':
+ case 'B':
+ case 'h':
+ /* Match month name. */
+ for (cnt = 0; cnt < 12; ++cnt)
+ {
+#ifdef _NL_CURRENT
+ if (*decided !=raw)
+ {
+ if (match_string (_NL_CURRENT (LC_TIME, MON_1 + cnt), rp))
+ {
+ if (*decided == not
+ && strcmp (_NL_CURRENT (LC_TIME, MON_1 + cnt),
+ month_name[cnt]))
+ *decided = loc;
+ break;
+ }
+ if (match_string (_NL_CURRENT (LC_TIME, ABMON_1 + cnt), rp))
+ {
+ if (*decided == not
+ && strcmp (_NL_CURRENT (LC_TIME, ABMON_1 + cnt),
+ ab_month_name[cnt]))
+ *decided = loc;
+ break;
+ }
+ }
+#endif
+ if (match_string (month_name[cnt], rp)
+ || match_string (ab_month_name[cnt], rp))
+ {
+ *decided = raw;
+ break;
+ }
+ }
+ if (cnt == 12)
+ /* Does not match a month name. */
+ return NULL;
+ tm->tm_mon = cnt;
+ want_xday = 1;
+ break;
+ case 'c':
+ /* Match locale's date and time format. */
+#ifdef _NL_CURRENT
+ if (*decided != raw)
+ {
+ if (!recursive (_NL_CURRENT (LC_TIME, D_T_FMT)))
+ {
+ if (*decided == loc)
+ return NULL;
+ else
+ rp = rp_backup;
+ }
+ else
+ {
+ if (*decided == not &&
+ strcmp (_NL_CURRENT (LC_TIME, D_T_FMT), HERE_D_T_FMT))
+ *decided = loc;
+ want_xday = 1;
+ break;
+ }
+ *decided = raw;
+ }
+#endif
+ if (!recursive (HERE_D_T_FMT))
+ return NULL;
+ want_xday = 1;
+ break;
+ case 'C':
+ /* Match century number. */
+#ifdef _NL_CURRENT
+ match_century:
+#endif
+ get_number (0, 99, 2);
+ century = val;
+ want_xday = 1;
+ break;
+ case 'd':
+ case 'e':
+ /* Match day of month. */
+ get_number (1, 31, 2);
+ tm->tm_mday = val;
+ have_mday = 1;
+ want_xday = 1;
+ break;
+ case 'F':
+ if (!recursive ("%Y-%m-%d"))
+ return NULL;
+ want_xday = 1;
+ break;
+ case 'x':
+#ifdef _NL_CURRENT
+ if (*decided != raw)
+ {
+ if (!recursive (_NL_CURRENT (LC_TIME, D_FMT)))
+ {
+ if (*decided == loc)
+ return NULL;
+ else
+ rp = rp_backup;
+ }
+ else
+ {
+ if (*decided == not
+ && strcmp (_NL_CURRENT (LC_TIME, D_FMT), HERE_D_FMT))
+ *decided = loc;
+ want_xday = 1;
+ break;
+ }
+ *decided = raw;
+ }
+#endif
+ /* Fall through. */
+ case 'D':
+ /* Match standard day format. */
+ if (!recursive (HERE_D_FMT))
+ return NULL;
+ want_xday = 1;
+ break;
+ case 'k':
+ case 'H':
+ /* Match hour in 24-hour clock. */
+ get_number (0, 23, 2);
+ tm->tm_hour = val;
+ have_I = 0;
+ break;
+ case 'I':
+ /* Match hour in 12-hour clock. */
+ get_number (1, 12, 2);
+ tm->tm_hour = val % 12;
+ have_I = 1;
+ break;
+ case 'j':
+ /* Match day number of year. */
+ get_number (1, 366, 3);
+ tm->tm_yday = val - 1;
+ have_yday = 1;
+ break;
+ case 'm':
+ /* Match number of month. */
+ get_number (1, 12, 2);
+ tm->tm_mon = val - 1;
+ have_mon = 1;
+ want_xday = 1;
+ break;
+ case 'M':
+ /* Match minute. */
+ get_number (0, 59, 2);
+ tm->tm_min = val;
+ break;
+ case 'n':
+ case 't':
+ /* Match any white space. */
+ while (isspace (*rp))
+ ++rp;
+ break;
+ case 'p':
+ /* Match locale's equivalent of AM/PM. */
+#ifdef _NL_CURRENT
+ if (*decided != raw)
+ {
+ if (match_string (_NL_CURRENT (LC_TIME, AM_STR), rp))
+ {
+ if (strcmp (_NL_CURRENT (LC_TIME, AM_STR), HERE_AM_STR))
+ *decided = loc;
+ break;
+ }
+ if (match_string (_NL_CURRENT (LC_TIME, PM_STR), rp))
+ {
+ if (strcmp (_NL_CURRENT (LC_TIME, PM_STR), HERE_PM_STR))
+ *decided = loc;
+ is_pm = 1;
+ break;
+ }
+ *decided = raw;
+ }
+#endif
+ if (!match_string (HERE_AM_STR, rp)) {
+ if (match_string (HERE_PM_STR, rp)) {
+ is_pm = 1;
+ } else {
+ return NULL;
+ }
+ }
+ break;
+ case 'r':
+#ifdef _NL_CURRENT
+ if (*decided != raw)
+ {
+ if (!recursive (_NL_CURRENT (LC_TIME, T_FMT_AMPM)))
+ {
+ if (*decided == loc)
+ return NULL;
+ else
+ rp = rp_backup;
+ }
+ else
+ {
+ if (*decided == not &&
+ strcmp (_NL_CURRENT (LC_TIME, T_FMT_AMPM),
+ HERE_T_FMT_AMPM))
+ *decided = loc;
+ break;
+ }
+ *decided = raw;
+ }
+#endif
+ if (!recursive (HERE_T_FMT_AMPM))
+ return NULL;
+ break;
+ case 'R':
+ if (!recursive ("%H:%M"))
+ return NULL;
+ break;
+ case 's':
+ {
+ /* The number of seconds may be very high so we cannot use
+ the `get_number' macro. Instead read the number
+ character for character and construct the result while
+ doing this. */
+ time_t secs = 0;
+ if (*rp < '0' || *rp > '9')
+ /* We need at least one digit. */
+ return NULL;
+
+ do
+ {
+ secs *= 10;
+ secs += *rp++ - '0';
+ }
+ while (*rp >= '0' && *rp <= '9');
+
+ if (localtime_r (&secs, tm) == NULL)
+ /* Error in function. */
+ return NULL;
+ }
+ break;
+ case 'S':
+ get_number (0, 61, 2);
+ tm->tm_sec = val;
+ break;
+ case 'X':
+#ifdef _NL_CURRENT
+ if (*decided != raw)
+ {
+ if (!recursive (_NL_CURRENT (LC_TIME, T_FMT)))
+ {
+ if (*decided == loc)
+ return NULL;
+ else
+ rp = rp_backup;
+ }
+ else
+ {
+ if (strcmp (_NL_CURRENT (LC_TIME, T_FMT), HERE_T_FMT))
+ *decided = loc;
+ break;
+ }
+ *decided = raw;
+ }
+#endif
+ /* Fall through. */
+ case 'T':
+ if (!recursive (HERE_T_FMT))
+ return NULL;
+ break;
+ case 'u':
+ get_number (1, 7, 1);
+ tm->tm_wday = val % 7;
+ have_wday = 1;
+ break;
+ case 'g':
+ get_number (0, 99, 2);
+ /* XXX This cannot determine any field in TM. */
+ break;
+ case 'G':
+ if (*rp < '0' || *rp > '9')
+ return NULL;
+ /* XXX Ignore the number since we would need some more
+ information to compute a real date. */
+ do
+ ++rp;
+ while (*rp >= '0' && *rp <= '9');
+ break;
+ case 'U':
+ case 'V':
+ case 'W':
+ get_number (0, 53, 2);
+ /* XXX This cannot determine any field in TM without some
+ information. */
+ break;
+ case 'w':
+ /* Match number of weekday. */
+ get_number (0, 6, 1);
+ tm->tm_wday = val;
+ have_wday = 1;
+ break;
+ case 'y':
+#ifdef _NL_CURRENT
+ match_year_in_century:
+#endif
+ /* Match year within century. */
+ get_number (0, 99, 2);
+ /* The "Year 2000: The Millennium Rollover" paper suggests that
+ values in the range 69-99 refer to the twentieth century. */
+ tm->tm_year = val >= 69 ? val : val + 100;
+ /* Indicate that we want to use the century, if specified. */
+ want_century = 1;
+ want_xday = 1;
+ break;
+ case 'Y':
+ /* Match year including century number. */
+ get_number (0, 9999, 4);
+ tm->tm_year = val - 1900;
+ want_century = 0;
+ want_xday = 1;
+ break;
+ case 'Z':
+ /* XXX How to handle this? */
+ break;
+ case 'E':
+#ifdef _NL_CURRENT
+ switch (*fmt++)
+ {
+ case 'c':
+ /* Match locale's alternate date and time format. */
+ if (*decided != raw)
+ {
+ const char *fmt = _NL_CURRENT (LC_TIME, ERA_D_T_FMT);
+
+ if (*fmt == '\0')
+ fmt = _NL_CURRENT (LC_TIME, D_T_FMT);
+
+ if (!recursive (fmt))
+ {
+ if (*decided == loc)
+ return NULL;
+ else
+ rp = rp_backup;
+ }
+ else
+ {
+ if (strcmp (fmt, HERE_D_T_FMT))
+ *decided = loc;
+ want_xday = 1;
+ break;
+ }
+ *decided = raw;
+ }
+ /* The C locale has no era information, so use the
+ normal representation. */
+ if (!recursive (HERE_D_T_FMT))
+ return NULL;
+ want_xday = 1;
+ break;
+ case 'C':
+ if (*decided != raw)
+ {
+ if (era_cnt >= 0)
+ {
+ era = _nl_select_era_entry (era_cnt);
+ if (match_string (era->era_name, rp))
+ {
+ *decided = loc;
+ break;
+ }
+ else
+ return NULL;
+ }
+ else
+ {
+ num_eras = _NL_CURRENT_WORD (LC_TIME,
+ _NL_TIME_ERA_NUM_ENTRIES);
+ for (era_cnt = 0; era_cnt < (int) num_eras;
+ ++era_cnt, rp = rp_backup)
+ {
+ era = _nl_select_era_entry (era_cnt);
+ if (match_string (era->era_name, rp))
+ {
+ *decided = loc;
+ break;
+ }
+ }
+ if (era_cnt == (int) num_eras)
+ {
+ era_cnt = -1;
+ if (*decided == loc)
+ return NULL;
+ }
+ else
+ break;
+ }
+
+ *decided = raw;
+ }
+ /* The C locale has no era information, so use the
+ normal representation. */
+ goto match_century;
+ case 'y':
+ if (*decided == raw)
+ goto match_year_in_century;
+
+ get_number(0, 9999, 4);
+ tm->tm_year = val;
+ want_era = 1;
+ want_xday = 1;
+ break;
+ case 'Y':
+ if (*decided != raw)
+ {
+ num_eras = _NL_CURRENT_WORD (LC_TIME,
+ _NL_TIME_ERA_NUM_ENTRIES);
+ for (era_cnt = 0; era_cnt < (int) num_eras;
+ ++era_cnt, rp = rp_backup)
+ {
+ era = _nl_select_era_entry (era_cnt);
+ if (recursive (era->era_format))
+ break;
+ }
+ if (era_cnt == (int) num_eras)
+ {
+ era_cnt = -1;
+ if (*decided == loc)
+ return NULL;
+ else
+ rp = rp_backup;
+ }
+ else
+ {
+ *decided = loc;
+ era_cnt = -1;
+ break;
+ }
+
+ *decided = raw;
+ }
+ get_number (0, 9999, 4);
+ tm->tm_year = val - 1900;
+ want_century = 0;
+ want_xday = 1;
+ break;
+ case 'x':
+ if (*decided != raw)
+ {
+ const char *fmt = _NL_CURRENT (LC_TIME, ERA_D_FMT);
+
+ if (*fmt == '\0')
+ fmt = _NL_CURRENT (LC_TIME, D_FMT);
+
+ if (!recursive (fmt))
+ {
+ if (*decided == loc)
+ return NULL;
+ else
+ rp = rp_backup;
+ }
+ else
+ {
+ if (strcmp (fmt, HERE_D_FMT))
+ *decided = loc;
+ break;
+ }
+ *decided = raw;
+ }
+ if (!recursive (HERE_D_FMT))
+ return NULL;
+ break;
+ case 'X':
+ if (*decided != raw)
+ {
+ const char *fmt = _NL_CURRENT (LC_TIME, ERA_T_FMT);
+
+ if (*fmt == '\0')
+ fmt = _NL_CURRENT (LC_TIME, T_FMT);
+
+ if (!recursive (fmt))
+ {
+ if (*decided == loc)
+ return NULL;
+ else
+ rp = rp_backup;
+ }
+ else
+ {
+ if (strcmp (fmt, HERE_T_FMT))
+ *decided = loc;
+ break;
+ }
+ *decided = raw;
+ }
+ if (!recursive (HERE_T_FMT))
+ return NULL;
+ break;
+ default:
+ return NULL;
+ }
+ break;
+#else
+ /* We have no information about the era format. Just use
+ the normal format. */
+ if (*fmt != 'c' && *fmt != 'C' && *fmt != 'y' && *fmt != 'Y'
+ && *fmt != 'x' && *fmt != 'X')
+ /* This is an illegal format. */
+ return NULL;
+
+ goto start_over;
+#endif
+ case 'O':
+ switch (*fmt++)
+ {
+ case 'd':
+ case 'e':
+ /* Match day of month using alternate numeric symbols. */
+ get_alt_number (1, 31, 2);
+ tm->tm_mday = val;
+ have_mday = 1;
+ want_xday = 1;
+ break;
+ case 'H':
+ /* Match hour in 24-hour clock using alternate numeric
+ symbols. */
+ get_alt_number (0, 23, 2);
+ tm->tm_hour = val;
+ have_I = 0;
+ break;
+ case 'I':
+ /* Match hour in 12-hour clock using alternate numeric
+ symbols. */
+ get_alt_number (1, 12, 2);
+ tm->tm_hour = val - 1;
+ have_I = 1;
+ break;
+ case 'm':
+ /* Match month using alternate numeric symbols. */
+ get_alt_number (1, 12, 2);
+ tm->tm_mon = val - 1;
+ have_mon = 1;
+ want_xday = 1;
+ break;
+ case 'M':
+ /* Match minutes using alternate numeric symbols. */
+ get_alt_number (0, 59, 2);
+ tm->tm_min = val;
+ break;
+ case 'S':
+ /* Match seconds using alternate numeric symbols. */
+ get_alt_number (0, 61, 2);
+ tm->tm_sec = val;
+ break;
+ case 'U':
+ case 'V':
+ case 'W':
+ get_alt_number (0, 53, 2);
+ /* XXX This cannot determine any field in TM without
+ further information. */
+ break;
+ case 'w':
+ /* Match number of weekday using alternate numeric symbols. */
+ get_alt_number (0, 6, 1);
+ tm->tm_wday = val;
+ have_wday = 1;
+ break;
+ case 'y':
+ /* Match year within century using alternate numeric symbols. */
+ get_alt_number (0, 99, 2);
+ tm->tm_year = val >= 69 ? val : val + 100;
+ want_xday = 1;
+ break;
+ default:
+ return NULL;
+ }
+ break;
+ default:
+ return NULL;
+ }
+ }
+
+ if (have_I && is_pm)
+ tm->tm_hour += 12;
+
+ if (century != -1)
+ {
+ if (want_century)
+ tm->tm_year = tm->tm_year % 100 + (century - 19) * 100;
+ else
+ /* Only the century, but not the year. Strange, but so be it. */
+ tm->tm_year = (century - 19) * 100;
+ }
+
+#ifdef _NL_CURRENT
+ if (era_cnt != -1)
+ {
+ era = _nl_select_era_entry(era_cnt);
+ if (want_era)
+ tm->tm_year = (era->start_date[0]
+ + ((tm->tm_year - era->offset)
+ * era->absolute_direction));
+ else
+ /* Era start year assumed. */
+ tm->tm_year = era->start_date[0];
+ }
+ else
+#endif
+ if (want_era)
+ return NULL;
+
+ if (want_xday && !have_wday)
+ {
+ if ( !(have_mon && have_mday) && have_yday)
+ {
+ /* We don't have tm_mon and/or tm_mday, compute them. */
+ int t_mon = 0;
+ while (__mon_yday[__isleap(1900 + tm->tm_year)][t_mon] <= tm->tm_yday)
+ t_mon++;
+ if (!have_mon)
+ tm->tm_mon = t_mon - 1;
+ if (!have_mday)
+ tm->tm_mday =
+ (tm->tm_yday
+ - __mon_yday[__isleap(1900 + tm->tm_year)][t_mon - 1] + 1);
+ }
+ day_of_the_week (tm);
+ }
+ if (want_xday && !have_yday)
+ day_of_the_year (tm);
+
+ return discard_const_p(char, rp);
+}
+
+
+char *rep_strptime(const char *buf, const char *format, struct tm *tm)
+{
+ enum locale_status decided;
+
+#ifdef _NL_CURRENT
+ decided = not;
+#else
+ decided = raw;
+#endif
+ return strptime_internal (buf, format, tm, &decided, -1);
+}
diff --git a/lib/replace/system/README b/lib/replace/system/README
new file mode 100644
index 0000000..69a2b80
--- /dev/null
+++ b/lib/replace/system/README
@@ -0,0 +1,4 @@
+This directory contains wrappers around logical groups of system
+include files. The idea is to avoid #ifdef blocks in the main code,
+and instead put all the necessary conditional includes in subsystem
+specific header files in this directory.
diff --git a/lib/replace/system/aio.h b/lib/replace/system/aio.h
new file mode 100644
index 0000000..784d77f
--- /dev/null
+++ b/lib/replace/system/aio.h
@@ -0,0 +1,32 @@
+#ifndef _system_aio_h
+#define _system_aio_h
+/*
+ Unix SMB/CIFS implementation.
+
+ AIO system include wrappers
+
+ Copyright (C) Andrew Tridgell 2006
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifdef HAVE_LIBAIO_H
+#include <libaio.h>
+#endif
+
+#endif
diff --git a/lib/replace/system/capability.h b/lib/replace/system/capability.h
new file mode 100644
index 0000000..a7b78f0
--- /dev/null
+++ b/lib/replace/system/capability.h
@@ -0,0 +1,55 @@
+#ifndef _system_capability_h
+#define _system_capability_h
+/*
+ Unix SMB/CIFS implementation.
+
+ capability system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifdef HAVE_SYS_CAPABILITY_H
+
+#if defined(BROKEN_REDHAT_7_SYSTEM_HEADERS) && !defined(_I386_STATFS_H) && !defined(_PPC_STATFS_H)
+#define _I386_STATFS_H
+#define _PPC_STATFS_H
+#define BROKEN_REDHAT_7_STATFS_WORKAROUND
+#endif
+
+#if defined(BROKEN_RHEL5_SYS_CAP_HEADER) && !defined(_LINUX_TYPES_H)
+#define BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND
+#endif
+
+#include <sys/capability.h>
+
+#ifdef BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND
+#undef _LINUX_TYPES_H
+#undef BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND
+#endif
+
+#ifdef BROKEN_REDHAT_7_STATFS_WORKAROUND
+#undef _PPC_STATFS_H
+#undef _I386_STATFS_H
+#undef BROKEN_REDHAT_7_STATFS_WORKAROUND
+#endif
+
+#endif
+
+#endif
diff --git a/lib/replace/system/dir.h b/lib/replace/system/dir.h
new file mode 100644
index 0000000..dec2d54
--- /dev/null
+++ b/lib/replace/system/dir.h
@@ -0,0 +1,67 @@
+#ifndef _system_dir_h
+#define _system_dir_h
+/*
+ Unix SMB/CIFS implementation.
+
+ directory system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#if HAVE_DIRENT_H
+# include <dirent.h>
+# define NAMLEN(dirent) strlen((dirent)->d_name)
+#else
+# define dirent direct
+# define NAMLEN(dirent) (dirent)->d_namlen
+# if HAVE_SYS_NDIR_H
+# include <sys/ndir.h>
+# endif
+# if HAVE_SYS_DIR_H
+# include <sys/dir.h>
+# endif
+# if HAVE_NDIR_H
+# include <ndir.h>
+# endif
+#endif
+
+#ifndef HAVE_MKDIR_MODE
+#define mkdir(dir, mode) mkdir(dir)
+#endif
+
+/* Test whether a file name is the "." or ".." directory entries.
+ * These really should be inline functions.
+ */
+#ifndef ISDOT
+#define ISDOT(path) ( \
+ *((const char *)(path)) == '.' && \
+ *(((const char *)(path)) + 1) == '\0' \
+ )
+#endif
+
+#ifndef ISDOTDOT
+#define ISDOTDOT(path) ( \
+ *((const char *)(path)) == '.' && \
+ *(((const char *)(path)) + 1) == '.' && \
+ *(((const char *)(path)) + 2) == '\0' \
+ )
+#endif
+
+#endif
diff --git a/lib/replace/system/filesys.h b/lib/replace/system/filesys.h
new file mode 100644
index 0000000..bd6e6a0
--- /dev/null
+++ b/lib/replace/system/filesys.h
@@ -0,0 +1,269 @@
+#ifndef _system_filesys_h
+#define _system_filesys_h
+/*
+ Unix SMB/CIFS implementation.
+
+ filesystem system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#include <sys/stat.h>
+
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+
+#ifdef HAVE_SYS_MOUNT_H
+#include <sys/mount.h>
+#endif
+
+#ifdef HAVE_MNTENT_H
+#include <mntent.h>
+#endif
+
+#ifdef HAVE_SYS_VFS_H
+#include <sys/vfs.h>
+#endif
+
+#ifdef HAVE_SYS_ACL_H
+#include <sys/acl.h>
+#endif
+
+#ifdef HAVE_ACL_LIBACL_H
+#include <acl/libacl.h>
+#endif
+
+#ifdef HAVE_SYS_FS_S5PARAM_H
+#include <sys/fs/s5param.h>
+#endif
+
+#if defined (HAVE_SYS_FILSYS_H) && !defined (_CRAY)
+#include <sys/filsys.h>
+#endif
+
+#ifdef HAVE_SYS_STATFS_H
+# include <sys/statfs.h>
+#endif
+
+#ifdef HAVE_DUSTAT_H
+#include <sys/dustat.h>
+#endif
+
+#ifdef HAVE_SYS_STATVFS_H
+#include <sys/statvfs.h>
+#endif
+
+#ifdef HAVE_SYS_FILIO_H
+#include <sys/filio.h>
+#endif
+
+#ifdef HAVE_SYS_FILE_H
+#include <sys/file.h>
+#endif
+
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#else
+#ifdef HAVE_SYS_FCNTL_H
+#include <sys/fcntl.h>
+#endif
+#endif
+
+#ifdef HAVE_SYS_MODE_H
+/* apparently AIX needs this for S_ISLNK */
+#ifndef S_ISLNK
+#include <sys/mode.h>
+#endif
+#endif
+
+#ifdef HAVE_SYS_IOCTL_H
+#include <sys/ioctl.h>
+#endif
+
+#ifdef HAVE_SYS_UIO_H
+#include <sys/uio.h>
+#endif
+
+/*
+ * Veritas File System. Often in addition to native.
+ * Quotas different.
+ */
+#if defined(HAVE_SYS_FS_VX_QUOTA_H)
+#define VXFS_QUOTA
+#endif
+
+#if HAVE_SYS_ATTRIBUTES_H
+#include <sys/attributes.h>
+#elif HAVE_ATTR_ATTRIBUTES_H
+#include <attr/attributes.h>
+#endif
+
+/* mutually exclusive (SuSE 8.2) */
+#if HAVE_ATTR_XATTR_H
+#include <attr/xattr.h>
+#elif HAVE_SYS_XATTR_H
+#include <sys/xattr.h>
+#endif
+
+#ifdef HAVE_SYS_EA_H
+#include <sys/ea.h>
+#endif
+
+#ifdef HAVE_SYS_EXTATTR_H
+#include <sys/extattr.h>
+#endif
+
+#ifdef HAVE_SYS_RESOURCE_H
+#include <sys/resource.h>
+#endif
+
+#ifndef XATTR_CREATE
+#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */
+#endif
+
+#ifndef XATTR_REPLACE
+#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */
+#endif
+
+/* Some POSIX definitions for those without */
+
+#ifndef S_IFDIR
+#define S_IFDIR 0x4000
+#endif
+#ifndef S_ISDIR
+#define S_ISDIR(mode) ((mode & 0xF000) == S_IFDIR)
+#endif
+#ifndef S_IRWXU
+#define S_IRWXU 00700 /* read, write, execute: owner */
+#endif
+#ifndef S_IRUSR
+#define S_IRUSR 00400 /* read permission: owner */
+#endif
+#ifndef S_IWUSR
+#define S_IWUSR 00200 /* write permission: owner */
+#endif
+#ifndef S_IXUSR
+#define S_IXUSR 00100 /* execute permission: owner */
+#endif
+#ifndef S_IRWXG
+#define S_IRWXG 00070 /* read, write, execute: group */
+#endif
+#ifndef S_IRGRP
+#define S_IRGRP 00040 /* read permission: group */
+#endif
+#ifndef S_IWGRP
+#define S_IWGRP 00020 /* write permission: group */
+#endif
+#ifndef S_IXGRP
+#define S_IXGRP 00010 /* execute permission: group */
+#endif
+#ifndef S_IRWXO
+#define S_IRWXO 00007 /* read, write, execute: other */
+#endif
+#ifndef S_IROTH
+#define S_IROTH 00004 /* read permission: other */
+#endif
+#ifndef S_IWOTH
+#define S_IWOTH 00002 /* write permission: other */
+#endif
+#ifndef S_IXOTH
+#define S_IXOTH 00001 /* execute permission: other */
+#endif
+
+#ifndef O_ACCMODE
+#define O_ACCMODE (O_RDONLY | O_WRONLY | O_RDWR)
+#endif
+
+#ifndef MAXPATHLEN
+#define MAXPATHLEN 256
+#endif
+
+#ifndef SEEK_SET
+#define SEEK_SET 0
+#endif
+
+#ifdef _WIN32
+#define mkdir(d,m) _mkdir(d)
+#endif
+
+/*
+ this allows us to use a uniform error handling for our xattr
+ wrappers
+*/
+#ifndef ENOATTR
+#define ENOATTR ENODATA
+#endif
+
+
+#if !defined(HAVE_GETXATTR) || defined(XATTR_ADDITIONAL_OPTIONS)
+ssize_t rep_getxattr (const char *path, const char *name, void *value, size_t size);
+#define getxattr(path, name, value, size) rep_getxattr(path, name, value, size)
+/* define is in "replace.h" */
+#endif
+
+#if !defined(HAVE_FGETXATTR) || defined(XATTR_ADDITIONAL_OPTIONS)
+ssize_t rep_fgetxattr (int filedes, const char *name, void *value, size_t size);
+#define fgetxattr(filedes, name, value, size) rep_fgetxattr(filedes, name, value, size)
+/* define is in "replace.h" */
+#endif
+
+#if !defined(HAVE_LISTXATTR) || defined(XATTR_ADDITIONAL_OPTIONS)
+ssize_t rep_listxattr (const char *path, char *list, size_t size);
+#define listxattr(path, list, size) rep_listxattr(path, list, size)
+/* define is in "replace.h" */
+#endif
+
+#if !defined(HAVE_FLISTXATTR) || defined(XATTR_ADDITIONAL_OPTIONS)
+ssize_t rep_flistxattr (int filedes, char *list, size_t size);
+#define flistxattr(filedes, value, size) rep_flistxattr(filedes, value, size)
+/* define is in "replace.h" */
+#endif
+
+#if !defined(HAVE_REMOVEXATTR) || defined(XATTR_ADDITIONAL_OPTIONS)
+int rep_removexattr (const char *path, const char *name);
+#define removexattr(path, name) rep_removexattr(path, name)
+/* define is in "replace.h" */
+#endif
+
+#if !defined(HAVE_FREMOVEXATTR) || defined(XATTR_ADDITIONAL_OPTIONS)
+int rep_fremovexattr (int filedes, const char *name);
+#define fremovexattr(filedes, name) rep_fremovexattr(filedes, name)
+/* define is in "replace.h" */
+#endif
+
+#if !defined(HAVE_SETXATTR) || defined(XATTR_ADDITIONAL_OPTIONS)
+int rep_setxattr (const char *path, const char *name, const void *value, size_t size, int flags);
+#define setxattr(path, name, value, size, flags) rep_setxattr(path, name, value, size, flags)
+/* define is in "replace.h" */
+#endif
+
+#if !defined(HAVE_FSETXATTR) || defined(XATTR_ADDITIONAL_OPTIONS)
+int rep_fsetxattr (int filedes, const char *name, const void *value, size_t size, int flags);
+#define fsetxattr(filedes, name, value, size, flags) rep_fsetxattr(filedes, name, value, size, flags)
+/* define is in "replace.h" */
+#endif
+
+#endif
diff --git a/lib/replace/system/glob.h b/lib/replace/system/glob.h
new file mode 100644
index 0000000..3e23db6
--- /dev/null
+++ b/lib/replace/system/glob.h
@@ -0,0 +1,37 @@
+#ifndef _system_glob_h
+#define _system_glob_h
+/*
+ Unix SMB/CIFS implementation.
+
+ glob system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifdef HAVE_GLOB_H
+#include <glob.h>
+#endif
+
+#ifdef HAVE_FNMATCH_H
+#include <fnmatch.h>
+#endif
+
+#endif
diff --git a/lib/replace/system/gssapi.h b/lib/replace/system/gssapi.h
new file mode 100644
index 0000000..6386c7b
--- /dev/null
+++ b/lib/replace/system/gssapi.h
@@ -0,0 +1,53 @@
+#ifndef _system_gssapi_h
+#define _system_gssapi_h
+
+/*
+ Unix SMB/CIFS implementation.
+
+ GSSAPI system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifdef HAVE_GSSAPI
+
+#ifdef HAVE_GSSAPI_GSSAPI_EXT_H
+#include <gssapi/gssapi_ext.h>
+#elif HAVE_GSSAPI_GSSAPI_H
+#include <gssapi/gssapi.h>
+#elif HAVE_GSSAPI_GSSAPI_GENERIC_H
+#include <gssapi/gssapi_generic.h>
+#elif HAVE_GSSAPI_H
+#include <gssapi.h>
+#endif
+
+#if HAVE_GSSAPI_GSSAPI_KRB5_H
+#include <gssapi/gssapi_krb5.h>
+#endif
+
+#if HAVE_GSSAPI_GSSAPI_SPNEGO_H
+#include <gssapi/gssapi_spnego.h>
+#elif HAVE_GSSAPI_SPNEGO_H
+#include <gssapi_spnego.h>
+#endif
+
+#endif
+#endif
diff --git a/lib/replace/system/iconv.h b/lib/replace/system/iconv.h
new file mode 100644
index 0000000..3c8a71f
--- /dev/null
+++ b/lib/replace/system/iconv.h
@@ -0,0 +1,57 @@
+#ifndef _system_iconv_h
+#define _system_iconv_h
+/*
+ Unix SMB/CIFS implementation.
+
+ iconv memory system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#if !defined(HAVE_ICONV) && defined(HAVE_ICONV_H)
+#define HAVE_ICONV
+#endif
+
+#if !defined(HAVE_GICONV) && defined(HAVE_GICONV_H)
+#define HAVE_GICONV
+#endif
+
+#if !defined(HAVE_BICONV) && defined(HAVE_BICONV_H)
+#define HAVE_BICONV
+#endif
+
+#ifdef HAVE_NATIVE_ICONV
+#if defined(HAVE_ICONV)
+#include <iconv.h>
+#elif defined(HAVE_GICONV)
+#include <giconv.h>
+#elif defined(HAVE_BICONV)
+#include <biconv.h>
+#endif
+#endif /* HAVE_NATIVE_ICONV */
+
+/* needed for some systems without iconv. Doesn't really matter
+ what error code we use */
+#ifndef EILSEQ
+#define EILSEQ EIO
+#endif
+
+#endif
diff --git a/lib/replace/system/kerberos.h b/lib/replace/system/kerberos.h
new file mode 100644
index 0000000..636ce0f
--- /dev/null
+++ b/lib/replace/system/kerberos.h
@@ -0,0 +1,41 @@
+#ifndef _system_kerberos_h
+#define _system_kerberos_h
+
+/*
+ Unix SMB/CIFS implementation.
+
+ kerberos system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifdef HAVE_KRB5
+
+#if HAVE_KRB5_H
+#include <krb5.h>
+#endif
+
+#if HAVE_COM_ERR_H
+#include <com_err.h>
+#endif
+
+#endif
+#endif
diff --git a/lib/replace/system/locale.h b/lib/replace/system/locale.h
new file mode 100644
index 0000000..504a3bb
--- /dev/null
+++ b/lib/replace/system/locale.h
@@ -0,0 +1,42 @@
+#ifndef _system_locale_h
+#define _system_locale_h
+
+/*
+ Unix SMB/CIFS implementation.
+
+ locale include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifdef HAVE_CTYPE_H
+#include <ctype.h>
+#endif
+
+#ifdef HAVE_LOCALE_H
+#include <locale.h>
+#endif
+
+#ifdef HAVE_LANGINFO_H
+#include <langinfo.h>
+#endif
+
+#endif
diff --git a/lib/replace/system/network.h b/lib/replace/system/network.h
new file mode 100644
index 0000000..4a3fd07
--- /dev/null
+++ b/lib/replace/system/network.h
@@ -0,0 +1,368 @@
+#ifndef _system_network_h
+#define _system_network_h
+/*
+ Unix SMB/CIFS implementation.
+
+ networking system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+ Copyright (C) Jelmer Vernooij 2007
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifndef LIBREPLACE_NETWORK_CHECKS
+#error "AC_LIBREPLACE_NETWORK_CHECKS missing in configure"
+#endif
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+
+#ifdef HAVE_UNIXSOCKET
+#include <sys/un.h>
+#endif
+
+#ifdef HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+#ifdef HAVE_ARPA_INET_H
+#include <arpa/inet.h>
+#endif
+
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+
+#ifdef HAVE_NETINET_TCP_H
+#include <netinet/tcp.h>
+#endif
+
+/*
+ * The next three defines are needed to access the IPTOS_* options
+ * on some systems.
+ */
+
+#ifdef HAVE_NETINET_IN_SYSTM_H
+#include <netinet/in_systm.h>
+#endif
+
+#ifdef HAVE_NETINET_IN_IP_H
+#include <netinet/in_ip.h>
+#endif
+
+#ifdef HAVE_NETINET_IP_H
+#include <netinet/ip.h>
+#endif
+
+#ifdef HAVE_NET_IF_H
+#include <net/if.h>
+#endif
+
+#ifdef HAVE_SYS_IOCTL_H
+#include <sys/ioctl.h>
+#endif
+
+#ifdef HAVE_SYS_UIO_H
+#include <sys/uio.h>
+#endif
+
+#ifdef HAVE_STROPTS_H
+#include <stropts.h>
+#endif
+
+#ifndef HAVE_SOCKLEN_T
+#define HAVE_SOCKLEN_T
+typedef int socklen_t;
+#endif
+
+#if !defined (HAVE_INET_NTOA) || defined(REPLACE_INET_NTOA)
+/* define is in "replace.h" */
+char *rep_inet_ntoa(struct in_addr ip);
+#endif
+
+#ifndef HAVE_INET_PTON
+/* define is in "replace.h" */
+int rep_inet_pton(int af, const char *src, void *dst);
+#endif
+
+#ifndef HAVE_INET_NTOP
+/* define is in "replace.h" */
+const char *rep_inet_ntop(int af, const void *src, char *dst, socklen_t size);
+#endif
+
+#ifndef HAVE_INET_ATON
+/* define is in "replace.h" */
+int rep_inet_aton(const char *src, struct in_addr *dst);
+#endif
+
+#ifndef HAVE_CONNECT
+/* define is in "replace.h" */
+int rep_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen);
+#endif
+
+#ifndef HAVE_GETHOSTBYNAME
+/* define is in "replace.h" */
+struct hostent *rep_gethostbyname(const char *name);
+#endif
+
+#ifdef HAVE_IFADDRS_H
+#include <ifaddrs.h>
+#endif
+
+#ifndef HAVE_STRUCT_IFADDRS
+struct ifaddrs {
+ struct ifaddrs *ifa_next; /* Pointer to next struct */
+ char *ifa_name; /* Interface name */
+ unsigned int ifa_flags; /* Interface flags */
+ struct sockaddr *ifa_addr; /* Interface address */
+ struct sockaddr *ifa_netmask; /* Interface netmask */
+#undef ifa_dstaddr
+ struct sockaddr *ifa_dstaddr; /* P2P interface destination */
+ void *ifa_data; /* Address specific data */
+};
+#endif
+
+#ifndef HAVE_GETIFADDRS
+int rep_getifaddrs(struct ifaddrs **);
+#endif
+
+#ifndef HAVE_FREEIFADDRS
+void rep_freeifaddrs(struct ifaddrs *);
+#endif
+
+#ifndef HAVE_SOCKETPAIR
+/* define is in "replace.h" */
+int rep_socketpair(int d, int type, int protocol, int sv[2]);
+#endif
+
+/*
+ * Some systems have getaddrinfo but not the
+ * defines needed to use it.
+ */
+
+/* Various macros that ought to be in <netdb.h>, but might not be */
+
+#ifndef EAI_FAIL
+#define EAI_BADFLAGS (-1)
+#define EAI_NONAME (-2)
+#define EAI_AGAIN (-3)
+#define EAI_FAIL (-4)
+#define EAI_FAMILY (-6)
+#define EAI_SOCKTYPE (-7)
+#define EAI_SERVICE (-8)
+#define EAI_MEMORY (-10)
+#define EAI_SYSTEM (-11)
+#endif /* !EAI_FAIL */
+
+#ifndef AI_PASSIVE
+#define AI_PASSIVE 0x0001
+#endif
+
+#ifndef AI_CANONNAME
+#define AI_CANONNAME 0x0002
+#endif
+
+#ifndef AI_NUMERICHOST
+/*
+ * some platforms don't support AI_NUMERICHOST; define as zero if using
+ * the system version of getaddrinfo...
+ */
+#if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO)
+#define AI_NUMERICHOST 0
+#else
+#define AI_NUMERICHOST 0x0004
+#endif
+#endif
+
+/*
+ * Some of the functions in source3/lib/util_sock.c use AI_ADDRCONFIG. On QNX
+ * 6.3.0, this macro is defined but, if it's used, getaddrinfo will fail. This
+ * prevents smbd from opening any sockets.
+ *
+ * If I undefine AI_ADDRCONFIG on such systems and define it to be 0,
+ * this works around the issue.
+ */
+#ifdef __QNX__
+#include <sys/neutrino.h>
+#if _NTO_VERSION == 630
+#undef AI_ADDRCONFIG
+#endif
+#endif
+#ifndef AI_ADDRCONFIG
+/*
+ * logic copied from AI_NUMERICHOST
+ */
+#if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO)
+#define AI_ADDRCONFIG 0
+#else
+#define AI_ADDRCONFIG 0x0020
+#endif
+#endif
+
+#ifndef AI_NUMERICSERV
+/*
+ * logic copied from AI_NUMERICHOST
+ */
+#if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO)
+#define AI_NUMERICSERV 0
+#else
+#define AI_NUMERICSERV 0x0400
+#endif
+#endif
+
+#ifndef NI_NUMERICHOST
+#define NI_NUMERICHOST 1
+#endif
+
+#ifndef NI_NUMERICSERV
+#define NI_NUMERICSERV 2
+#endif
+
+#ifndef NI_NOFQDN
+#define NI_NOFQDN 4
+#endif
+
+#ifndef NI_NAMEREQD
+#define NI_NAMEREQD 8
+#endif
+
+#ifndef NI_DGRAM
+#define NI_DGRAM 16
+#endif
+
+
+#ifndef NI_MAXHOST
+#define NI_MAXHOST 1025
+#endif
+
+#ifndef NI_MAXSERV
+#define NI_MAXSERV 32
+#endif
+
+/*
+ * glibc on linux doesn't seem to have MSG_WAITALL
+ * defined. I think the kernel has it though..
+ */
+#ifndef MSG_WAITALL
+#define MSG_WAITALL 0
+#endif
+
+#ifndef INADDR_LOOPBACK
+#define INADDR_LOOPBACK 0x7f000001
+#endif
+
+#ifndef INADDR_NONE
+#define INADDR_NONE 0xffffffff
+#endif
+
+#ifndef EAFNOSUPPORT
+#define EAFNOSUPPORT EINVAL
+#endif
+
+#ifndef INET_ADDRSTRLEN
+#define INET_ADDRSTRLEN 16
+#endif
+
+#ifndef INET6_ADDRSTRLEN
+#define INET6_ADDRSTRLEN 46
+#endif
+
+#ifndef HOST_NAME_MAX
+#define HOST_NAME_MAX 255
+#endif
+
+#ifndef MAXHOSTNAMELEN
+#define MAXHOSTNAMELEN HOST_NAME_MAX
+#endif
+
+#ifndef HAVE_SA_FAMILY_T
+#define HAVE_SA_FAMILY_T
+typedef unsigned short int sa_family_t;
+#endif
+
+#ifndef HAVE_STRUCT_SOCKADDR_STORAGE
+#define HAVE_STRUCT_SOCKADDR_STORAGE
+#ifdef HAVE_STRUCT_SOCKADDR_IN6
+#define sockaddr_storage sockaddr_in6
+#define ss_family sin6_family
+#define HAVE_SS_FAMILY 1
+#else /*HAVE_STRUCT_SOCKADDR_IN6*/
+#define sockaddr_storage sockaddr_in
+#define ss_family sin_family
+#define HAVE_SS_FAMILY 1
+#endif /*HAVE_STRUCT_SOCKADDR_IN6*/
+#endif /*HAVE_STRUCT_SOCKADDR_STORAGE*/
+
+#ifndef HAVE_SS_FAMILY
+#ifdef HAVE___SS_FAMILY
+#define ss_family __ss_family
+#define HAVE_SS_FAMILY 1
+#endif
+#endif
+
+#ifndef IOV_MAX
+# ifdef UIO_MAXIOV
+# define IOV_MAX UIO_MAXIOV
+# else
+# ifdef __sgi
+ /*
+ * IRIX 6.5 has sysconf(_SC_IOV_MAX)
+ * which might return 512 or bigger
+ */
+# define IOV_MAX 512
+# endif
+# endif
+#endif
+
+#ifndef HAVE_STRUCT_ADDRINFO
+#define HAVE_STRUCT_ADDRINFO
+struct addrinfo {
+ int ai_flags;
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+ socklen_t ai_addrlen;
+ struct sockaddr *ai_addr;
+ char *ai_canonname;
+ struct addrinfo *ai_next;
+};
+#endif /* HAVE_STRUCT_ADDRINFO */
+
+#if !defined(HAVE_GETADDRINFO)
+#include "getaddrinfo.h"
+#endif
+
+/* Needed for some systems that don't define it (Solaris). */
+#ifndef ifr_netmask
+#define ifr_netmask ifr_addr
+#endif
+
+/* Some old Linux systems have broken header files */
+#ifdef HAVE_IPV6
+#ifdef HAVE_LINUX_IPV6_V6ONLY_26
+#define IPV6_V6ONLY 26
+#endif /* HAVE_LINUX_IPV6_V6ONLY_26 */
+#endif /* HAVE_IPV6 */
+
+#endif
diff --git a/lib/replace/system/passwd.h b/lib/replace/system/passwd.h
new file mode 100644
index 0000000..ecc9f60
--- /dev/null
+++ b/lib/replace/system/passwd.h
@@ -0,0 +1,92 @@
+#ifndef _system_passwd_h
+#define _system_passwd_h
+
+/*
+ Unix SMB/CIFS implementation.
+
+ passwd system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#ifdef HAVE_PWD_H
+#include <pwd.h>
+#endif
+#ifdef HAVE_GRP_H
+#include <grp.h>
+#endif
+#ifdef HAVE_SYS_PRIV_H
+#include <sys/priv.h>
+#endif
+#ifdef HAVE_SYS_ID_H
+#include <sys/id.h>
+#endif
+
+#ifdef HAVE_CRYPT_H
+#include <crypt.h>
+#endif
+
+#ifdef HAVE_SHADOW_H
+#include <shadow.h>
+#endif
+
+#ifdef HAVE_SYS_SECURITY_H
+#include <sys/security.h>
+#include <prot.h>
+#define PASSWORD_LENGTH 16
+#endif /* HAVE_SYS_SECURITY_H */
+
+#ifdef HAVE_GETPWANAM
+#include <sys/label.h>
+#include <sys/audit.h>
+#include <pwdadj.h>
+#endif
+
+#ifdef HAVE_COMPAT_H
+#include <compat.h>
+#endif
+
+#ifndef NGROUPS_MAX
+#define NGROUPS_MAX 32 /* Guess... */
+#endif
+
+/* what is the longest significant password available on your system?
+ Knowing this speeds up password searches a lot */
+#ifndef PASSWORD_LENGTH
+#define PASSWORD_LENGTH 8
+#endif
+
+
+#ifndef ALLOW_CHANGE_PASSWORD
+#if (defined(HAVE_TERMIOS_H) && defined(HAVE_DUP2) && defined(HAVE_SETSID))
+#define ALLOW_CHANGE_PASSWORD 1
+#endif
+#endif
+
+#if defined(HAVE_CRYPT16) && defined(HAVE_GETAUTHUID)
+#define ULTRIX_AUTH 1
+#endif
+
+#endif
diff --git a/lib/replace/system/readline.h b/lib/replace/system/readline.h
new file mode 100644
index 0000000..e6b8fb9
--- /dev/null
+++ b/lib/replace/system/readline.h
@@ -0,0 +1,58 @@
+#ifndef _system_readline_h
+#define _system_readline_h
+/*
+ Unix SMB/CIFS implementation.
+
+ Readline wrappers
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifdef HAVE_LIBREADLINE
+# ifdef HAVE_READLINE_READLINE_H
+# include <readline/readline.h>
+# ifdef HAVE_READLINE_HISTORY_H
+# include <readline/history.h>
+# endif
+# else
+# ifdef HAVE_READLINE_H
+# include <readline.h>
+# ifdef HAVE_HISTORY_H
+# include <history.h>
+# endif
+# else
+# undef HAVE_LIBREADLINE
+# endif
+# endif
+#endif
+
+#ifdef HAVE_NEW_LIBREADLINE
+#ifdef HAVE_CPPFUNCTION
+# define RL_COMPLETION_CAST (CPPFunction *)
+#elif HAVE_RL_COMPLETION_T
+# define RL_COMPLETION_CAST (rl_completion_t *)
+#else
+# define RL_COMPLETION_CAST
+#endif
+#else
+/* This type is missing from libreadline<4.0 (approximately) */
+# define RL_COMPLETION_CAST
+#endif /* HAVE_NEW_LIBREADLINE */
+
+#endif
diff --git a/lib/replace/system/select.h b/lib/replace/system/select.h
new file mode 100644
index 0000000..9e945c3
--- /dev/null
+++ b/lib/replace/system/select.h
@@ -0,0 +1,81 @@
+#ifndef _system_select_h
+#define _system_select_h
+/*
+ Unix SMB/CIFS implementation.
+
+ select system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifdef HAVE_SYS_SELECT_H
+#include <sys/select.h>
+#endif
+
+#ifdef HAVE_SYS_EPOLL_H
+#include <sys/epoll.h>
+#endif
+
+#ifdef HAVE_SOLARIS_PORTS
+#include <port.h>
+#endif
+
+#ifndef SELECT_CAST
+#define SELECT_CAST
+#endif
+
+#ifdef HAVE_POLL
+
+#include <poll.h>
+
+#else
+
+/* Type used for the number of file descriptors. */
+typedef unsigned long int nfds_t;
+
+/* Data structure describing a polling request. */
+struct pollfd
+{
+ int fd; /* File descriptor to poll. */
+ short int events; /* Types of events poller cares about. */
+ short int revents; /* Types of events that actually occurred. */
+};
+
+/* Event types that can be polled for. These bits may be set in `events'
+ to indicate the interesting event types; they will appear in `revents'
+ to indicate the status of the file descriptor. */
+#define POLLIN 0x001 /* There is data to read. */
+#define POLLPRI 0x002 /* There is urgent data to read. */
+#define POLLOUT 0x004 /* Writing now will not block. */
+#define POLLRDNORM 0x040 /* Normal data may be read. */
+#define POLLRDBAND 0x080 /* Priority data may be read. */
+#define POLLWRNORM 0x100 /* Writing now will not block. */
+#define POLLWRBAND 0x200 /* Priority data may be written. */
+#define POLLERR 0x008 /* Error condition. */
+#define POLLHUP 0x010 /* Hung up. */
+#define POLLNVAL 0x020 /* Invalid polling request. */
+
+/* define is in "replace.h" */
+int rep_poll(struct pollfd *fds, nfds_t nfds, int timeout);
+
+#endif
+
+#endif
diff --git a/lib/replace/system/shmem.h b/lib/replace/system/shmem.h
new file mode 100644
index 0000000..64fe39b
--- /dev/null
+++ b/lib/replace/system/shmem.h
@@ -0,0 +1,59 @@
+#ifndef _system_shmem_h
+#define _system_shmem_h
+/*
+ Unix SMB/CIFS implementation.
+
+ shared memory system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#if defined(HAVE_SYS_IPC_H)
+#include <sys/ipc.h>
+#endif /* HAVE_SYS_IPC_H */
+
+#if defined(HAVE_SYS_SHM_H)
+#include <sys/shm.h>
+#endif /* HAVE_SYS_SHM_H */
+
+#ifdef HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#endif
+
+/* NetBSD doesn't have these */
+#ifndef SHM_R
+#define SHM_R 0400
+#endif
+
+#ifndef SHM_W
+#define SHM_W 0200
+#endif
+
+
+#ifndef MAP_FILE
+#define MAP_FILE 0
+#endif
+
+#ifndef MAP_FAILED
+#define MAP_FAILED ((void *)-1)
+#endif
+
+#endif
diff --git a/lib/replace/system/syslog.h b/lib/replace/system/syslog.h
new file mode 100644
index 0000000..104be1d
--- /dev/null
+++ b/lib/replace/system/syslog.h
@@ -0,0 +1,70 @@
+#ifndef _system_syslog_h
+#define _system_syslog_h
+/*
+ Unix SMB/CIFS implementation.
+
+ syslog system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifdef HAVE_SYSLOG_H
+#include <syslog.h>
+#else
+#ifdef HAVE_SYS_SYSLOG_H
+#include <sys/syslog.h>
+#endif
+#endif
+
+/* For sys_adminlog(). */
+#ifndef LOG_EMERG
+#define LOG_EMERG 0 /* system is unusable */
+#endif
+
+#ifndef LOG_ALERT
+#define LOG_ALERT 1 /* action must be taken immediately */
+#endif
+
+#ifndef LOG_CRIT
+#define LOG_CRIT 2 /* critical conditions */
+#endif
+
+#ifndef LOG_ERR
+#define LOG_ERR 3 /* error conditions */
+#endif
+
+#ifndef LOG_WARNING
+#define LOG_WARNING 4 /* warning conditions */
+#endif
+
+#ifndef LOG_NOTICE
+#define LOG_NOTICE 5 /* normal but significant condition */
+#endif
+
+#ifndef LOG_INFO
+#define LOG_INFO 6 /* informational */
+#endif
+
+#ifndef LOG_DEBUG
+#define LOG_DEBUG 7 /* debug-level messages */
+#endif
+
+#endif
diff --git a/lib/replace/system/terminal.h b/lib/replace/system/terminal.h
new file mode 100644
index 0000000..9ad601a
--- /dev/null
+++ b/lib/replace/system/terminal.h
@@ -0,0 +1,46 @@
+#ifndef _system_terminal_h
+#define _system_terminal_h
+/*
+ Unix SMB/CIFS implementation.
+
+ terminal system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifdef SUNOS4
+/* on SUNOS4 termios.h conflicts with sys/ioctl.h */
+#undef HAVE_TERMIOS_H
+#endif
+
+
+#if defined(HAVE_TERMIOS_H)
+/* POSIX terminal handling. */
+#include <termios.h>
+#elif defined(HAVE_TERMIO_H)
+/* Older SYSV terminal handling - don't use if we can avoid it. */
+#include <termio.h>
+#elif defined(HAVE_SYS_TERMIO_H)
+/* Older SYSV terminal handling - don't use if we can avoid it. */
+#include <sys/termio.h>
+#endif
+
+#endif
diff --git a/lib/replace/system/threads.h b/lib/replace/system/threads.h
new file mode 100644
index 0000000..25d3502
--- /dev/null
+++ b/lib/replace/system/threads.h
@@ -0,0 +1,48 @@
+#ifndef _system_threads_h
+#define _system_threads_h
+/*
+ Unix SMB/CIFS implementation.
+
+ macros to go along with the lib/replace/ portability layer code
+
+ Copyright (C) Volker Lendecke 2012
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <pthread.h>
+
+#if defined(HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP) && \
+ !defined(HAVE_PTHREAD_MUTEXATTR_SETROBUST)
+
+#define pthread_mutexattr_setrobust pthread_mutexattr_setrobust_np
+
+/*
+ * We assume that PTHREAD_MUTEX_ROBUST_NP goes along with
+ * pthread_mutexattr_setrobust_np()
+ */
+#define PTHREAD_MUTEX_ROBUST PTHREAD_MUTEX_ROBUST_NP
+
+#endif
+
+#if defined(HAVE_PTHREAD_MUTEX_CONSISTENT_NP) && \
+ !defined(HAVE_PTHREAD_MUTEX_CONSISTENT)
+#define pthread_mutex_consistent pthread_mutex_consistent_np
+#endif
+
+#endif
diff --git a/lib/replace/system/time.h b/lib/replace/system/time.h
new file mode 100644
index 0000000..b6d2609
--- /dev/null
+++ b/lib/replace/system/time.h
@@ -0,0 +1,91 @@
+#ifndef _system_time_h
+#define _system_time_h
+/*
+ Unix SMB/CIFS implementation.
+
+ time system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifdef TIME_WITH_SYS_TIME
+#include <sys/time.h>
+#include <time.h>
+#else
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+#ifdef HAVE_UTIME_H
+#include <utime.h>
+#else
+struct utimbuf {
+ time_t actime; /* access time */
+ time_t modtime; /* modification time */
+};
+#endif
+
+#ifndef HAVE_STRUCT_TIMESPEC
+struct timespec {
+ time_t tv_sec; /* Seconds. */
+ long tv_nsec; /* Nanoseconds. */
+};
+#endif
+
+#ifndef HAVE_MKTIME
+/* define is in "replace.h" */
+time_t rep_mktime(struct tm *t);
+#endif
+
+#ifndef HAVE_TIMEGM
+/* define is in "replace.h" */
+time_t rep_timegm(struct tm *tm);
+#endif
+
+#ifndef HAVE_UTIME
+/* define is in "replace.h" */
+int rep_utime(const char *filename, const struct utimbuf *buf);
+#endif
+
+#ifndef HAVE_UTIMES
+/* define is in "replace.h" */
+int rep_utimes(const char *filename, const struct timeval tv[2]);
+#endif
+
+#ifndef HAVE_CLOCK_GETTIME
+/* CLOCK_REALTIME is required by POSIX */
+#define CLOCK_REALTIME 0
+typedef int clockid_t;
+int rep_clock_gettime(clockid_t clk_id, struct timespec *tp);
+#endif
+/* make sure we have a best effort CUSTOM_CLOCK_MONOTONIC we can rely on */
+#if defined(CLOCK_MONOTONIC)
+#define CUSTOM_CLOCK_MONOTONIC CLOCK_MONOTONIC
+#elif defined(CLOCK_HIGHRES)
+#define CUSTOM_CLOCK_MONOTONIC CLOCK_HIGHRES
+#else
+#define CUSTOM_CLOCK_MONOTONIC CLOCK_REALTIME
+#endif
+
+#endif
diff --git a/lib/replace/system/wait.h b/lib/replace/system/wait.h
new file mode 100644
index 0000000..146c61a
--- /dev/null
+++ b/lib/replace/system/wait.h
@@ -0,0 +1,55 @@
+#ifndef _system_wait_h
+#define _system_wait_h
+/*
+ Unix SMB/CIFS implementation.
+
+ waitpid system include wrappers
+
+ Copyright (C) Andrew Tridgell 2004
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifdef HAVE_SYS_WAIT_H
+#include <sys/wait.h>
+#endif
+
+#include <signal.h>
+
+#ifndef SIGCLD
+#define SIGCLD SIGCHLD
+#endif
+
+#ifdef HAVE_SETJMP_H
+#include <setjmp.h>
+#endif
+
+#ifdef HAVE_SYS_UCONTEXT_H
+#include <sys/ucontext.h>
+#endif
+
+#if !defined(HAVE_SIG_ATOMIC_T_TYPE)
+typedef int sig_atomic_t;
+#endif
+
+#if !defined(HAVE_WAITPID) && defined(HAVE_WAIT4)
+int rep_waitpid(pid_t pid,int *status,int options)
+#endif
+
+#endif
diff --git a/lib/replace/system/wscript_configure b/lib/replace/system/wscript_configure
new file mode 100644
index 0000000..2035474
--- /dev/null
+++ b/lib/replace/system/wscript_configure
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+conf.CHECK_HEADERS('sys/capability.h')
+conf.CHECK_FUNCS('getpwnam_r getpwuid_r getpwent_r')
+
+# solaris varients of getXXent_r
+conf.CHECK_C_PROTOTYPE('getpwent_r',
+ 'struct passwd *getpwent_r(struct passwd *src, char *buf, int buflen)',
+ define='SOLARIS_GETPWENT_R', headers='pwd.h')
+conf.CHECK_C_PROTOTYPE('getgrent_r',
+ 'struct group *getgrent_r(struct group *src, char *buf, int buflen)',
+ define='SOLARIS_GETGRENT_R', headers='grp.h')
+
+# the irix varients
+conf.CHECK_C_PROTOTYPE('getpwent_r',
+ 'struct passwd *getpwent_r(struct passwd *src, char *buf, size_t buflen)',
+ define='SOLARIS_GETPWENT_R', headers='pwd.h')
+conf.CHECK_C_PROTOTYPE('getgrent_r',
+ 'struct group *getgrent_r(struct group *src, char *buf, size_t buflen)',
+ define='SOLARIS_GETGRENT_R', headers='grp.h')
+
+conf.CHECK_FUNCS('getgrouplist')
+conf.CHECK_HEADERS('ctype.h locale.h langinfo.h')
+conf.CHECK_HEADERS('fnmatch.h locale.h langinfo.h')
+conf.CHECK_HEADERS('sys/ipc.h sys/mman.h sys/shm.h')
+conf.CHECK_HEADERS('termios.h termio.h sys/termio.h')
diff --git a/lib/replace/test/getifaddrs.c b/lib/replace/test/getifaddrs.c
new file mode 100644
index 0000000..8d575af
--- /dev/null
+++ b/lib/replace/test/getifaddrs.c
@@ -0,0 +1,105 @@
+/*
+ * Unix SMB/CIFS implementation.
+ *
+ * libreplace getifaddrs test
+ *
+ * Copyright (C) Michael Adam <obnox@samba.org> 2008
+ *
+ * ** NOTE! The following LGPL license applies to the replace
+ * ** library. This does NOT imply that all of Samba is released
+ * ** under the LGPL
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 3 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef AUTOCONF_TEST
+#include "replace.h"
+#include "system/network.h"
+#include "replace-test.h"
+#endif
+
+#ifdef HAVE_INET_NTOP
+#define rep_inet_ntop inet_ntop
+#endif
+
+static const char *format_sockaddr(struct sockaddr *addr,
+ char *addrstring,
+ socklen_t addrlen)
+{
+ const char *result = NULL;
+
+ if (addr->sa_family == AF_INET) {
+ result = rep_inet_ntop(AF_INET,
+ &((struct sockaddr_in *)addr)->sin_addr,
+ addrstring,
+ addrlen);
+#ifdef HAVE_STRUCT_SOCKADDR_IN6
+ } else if (addr->sa_family == AF_INET6) {
+ result = rep_inet_ntop(AF_INET6,
+ &((struct sockaddr_in6 *)addr)->sin6_addr,
+ addrstring,
+ addrlen);
+#endif
+ }
+ return result;
+}
+
+int getifaddrs_test(void)
+{
+ struct ifaddrs *ifs = NULL;
+ struct ifaddrs *ifs_head = NULL;
+ int ret;
+
+ ret = getifaddrs(&ifs);
+ ifs_head = ifs;
+ if (ret != 0) {
+ fprintf(stderr, "getifaddrs() failed: %s\n", strerror(errno));
+ return 1;
+ }
+
+ while (ifs) {
+ printf("%-10s ", ifs->ifa_name);
+ if (ifs->ifa_addr != NULL) {
+ char addrstring[INET6_ADDRSTRLEN];
+ const char *result;
+
+ result = format_sockaddr(ifs->ifa_addr,
+ addrstring,
+ sizeof(addrstring));
+ if (result != NULL) {
+ printf("IP=%s ", addrstring);
+ }
+
+ if (ifs->ifa_netmask != NULL) {
+ result = format_sockaddr(ifs->ifa_netmask,
+ addrstring,
+ sizeof(addrstring));
+ if (result != NULL) {
+ printf("NETMASK=%s", addrstring);
+ }
+ } else {
+ printf("AF=%d ", ifs->ifa_addr->sa_family);
+ }
+ } else {
+ printf("<no address>");
+ }
+
+ printf("\n");
+ ifs = ifs->ifa_next;
+ }
+
+ freeifaddrs(ifs_head);
+
+ return 0;
+}
diff --git a/lib/replace/test/incoherent_mmap.c b/lib/replace/test/incoherent_mmap.c
new file mode 100644
index 0000000..ee288fd
--- /dev/null
+++ b/lib/replace/test/incoherent_mmap.c
@@ -0,0 +1,83 @@
+/* In OpenBSD, if you write to a file, another process doesn't see it
+ * in its mmap. Returns with exit status 0 if that is the case, 1 if
+ * it's coherent, and other if there's a problem. */
+#include <err.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#define DATA "coherent.mmap"
+
+int main(int argc, char *argv[])
+{
+ int tochild[2], toparent[2];
+ int fd;
+ volatile unsigned char *map;
+ unsigned char *page;
+ const char *fname = argv[1];
+ char c = 0;
+
+ if (pipe(tochild) != 0 || pipe(toparent) != 0)
+ err(2, "Creating pipe");
+
+ if (!fname)
+ fname = DATA;
+
+ fd = open(fname, O_RDWR|O_CREAT|O_TRUNC, 0600);
+ if (fd < 0)
+ err(2, "opening %s", fname);
+ unlink(fname);
+
+ switch (fork()) {
+ case -1:
+ err(2, "Fork");
+ case 0:
+ close(tochild[1]);
+ close(toparent[0]);
+
+ /* Wait for parent to create file. */
+ if (read(tochild[0], &c, 1) != 1)
+ err(2, "reading from parent");
+
+ /* Alter first byte. */
+ pwrite(fd, &c, 1, 0);
+
+ if (write(toparent[1], &c, 1) != 1)
+ err(2, "writing to parent");
+ exit(0);
+
+ default:
+ close(tochild[0]);
+ close(toparent[1]);
+
+ /* Create a file and mmap it. */
+ page = malloc(getpagesize());
+ memset(page, 0x42, getpagesize());
+ if (write(fd, page, getpagesize()) != getpagesize())
+ err(2, "writing first page");
+ map = mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ if (map == MAP_FAILED)
+ err(2, "mapping file");
+
+ if (*map != 0x42)
+ errx(2, "first byte isn't 0x42!");
+
+ /* Tell child to alter file. */
+ if (write(tochild[1], &c, 1) != 1)
+ err(2, "writing to child");
+
+ if (read(toparent[0], &c, 1) != 1)
+ err(2, "reading from child");
+
+ if (*map)
+ errx(0, "mmap incoherent: first byte isn't 0.");
+
+ exit(1);
+ }
+}
diff --git a/lib/replace/test/main.c b/lib/replace/test/main.c
new file mode 100644
index 0000000..94264d7
--- /dev/null
+++ b/lib/replace/test/main.c
@@ -0,0 +1,35 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ libreplace tests
+
+ Copyright (C) Jelmer Vernooij 2006
+
+ ** NOTE! The following LGPL license applies to the talloc
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "replace-testsuite.h"
+
+int main(void)
+{
+ bool ret = torture_local_replace(NULL);
+ if (ret)
+ return 0;
+ return -1;
+}
diff --git a/lib/replace/test/os2_delete.c b/lib/replace/test/os2_delete.c
new file mode 100644
index 0000000..0816f61
--- /dev/null
+++ b/lib/replace/test/os2_delete.c
@@ -0,0 +1,134 @@
+/*
+ test readdir/unlink pattern that OS/2 uses
+ tridge@samba.org July 2005
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include "replace-test.h"
+
+#define NUM_FILES 700
+#define READDIR_SIZE 100
+#define DELETE_SIZE 4
+
+#define TESTDIR "test.dir"
+
+static int test_readdir_os2_delete_ret;
+
+#define FAILED(d) (printf("failure: readdir [\nFailed for %s - %d = %s\n]\n", d, errno, strerror(errno)), test_readdir_os2_delete_ret = 1)
+
+#ifndef MIN
+#define MIN(a,b) ((a)<(b)?(a):(b))
+#endif
+
+#ifdef _WIN32
+#define mkdir(d,m) _mkdir(d)
+#endif
+
+static void cleanup(void)
+{
+ /* I'm a lazy bastard */
+ if (system("rm -rf " TESTDIR)) {
+ FAILED("system");
+ }
+ mkdir(TESTDIR, 0700) == 0 || FAILED("mkdir");
+}
+
+static void create_files(void)
+{
+ int i;
+ for (i=0;i<NUM_FILES;i++) {
+ char fname[40];
+ int fd;
+ snprintf(fname, sizeof(fname), TESTDIR "/test%u.txt", i);
+ fd = open(fname, O_CREAT|O_RDWR, 0600);
+ if (fd < 0) {
+ FAILED("open");
+ }
+ if (close(fd) != 0) {
+ FAILED("close");
+ }
+ }
+}
+
+static int os2_delete(DIR *d)
+{
+ off_t offsets[READDIR_SIZE];
+ int i, j;
+ struct dirent *de;
+ char names[READDIR_SIZE][30];
+
+ /* scan, remembering offsets */
+ for (i=0, de=readdir(d);
+ de && i < READDIR_SIZE;
+ de=readdir(d), i++) {
+ offsets[i] = telldir(d);
+ /* strlcpy not available here */
+ snprintf(names[i], sizeof(names[i]), "%s", de->d_name);
+ }
+
+ if (i == 0) {
+ return 0;
+ }
+
+ /* delete the first few */
+ for (j=0; j<MIN(i, DELETE_SIZE); j++) {
+ char fname[40];
+ snprintf(fname, sizeof(fname), TESTDIR "/%s", names[j]);
+ unlink(fname) == 0 || FAILED("unlink");
+ }
+
+ /* seek to just after the deletion */
+ seekdir(d, offsets[j-1]);
+
+ /* return number deleted */
+ return j;
+}
+
+int test_readdir_os2_delete(void)
+{
+ int total_deleted = 0;
+ DIR *d;
+ struct dirent *de;
+
+ test_readdir_os2_delete_ret = 0;
+
+ cleanup();
+ create_files();
+
+ d = opendir(TESTDIR "/test0.txt");
+ if (d != NULL) FAILED("opendir() on file succeed");
+ if (errno != ENOTDIR) FAILED("opendir() on file didn't give ENOTDIR");
+
+ d = opendir(TESTDIR);
+
+ /* skip past . and .. */
+ de = readdir(d);
+ strcmp(de->d_name, ".") == 0 || FAILED("match .");
+ de = readdir(d);
+ strcmp(de->d_name, "..") == 0 || FAILED("match ..");
+
+ while (1) {
+ int n = os2_delete(d);
+ if (n == 0) break;
+ total_deleted += n;
+ }
+ closedir(d);
+
+ fprintf(stderr, "Deleted %d files of %d\n", total_deleted, NUM_FILES);
+
+ rmdir(TESTDIR) == 0 || FAILED("rmdir");
+
+ if (system("rm -rf " TESTDIR) == -1) {
+ FAILED("system");
+ }
+
+ return test_readdir_os2_delete_ret;
+}
diff --git a/lib/replace/test/shared_mmap.c b/lib/replace/test/shared_mmap.c
new file mode 100644
index 0000000..50dad8d
--- /dev/null
+++ b/lib/replace/test/shared_mmap.c
@@ -0,0 +1,68 @@
+/* this tests whether we can use a shared writeable mmap on a file -
+ as needed for the mmap variant of FAST_SHARE_MODES */
+
+#if defined(HAVE_UNISTD_H)
+#include <unistd.h>
+#endif
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#define DATA "conftest.mmap"
+
+#ifndef MAP_FILE
+#define MAP_FILE 0
+#endif
+
+main()
+{
+ int *buf;
+ int i;
+ int fd = open(DATA,O_RDWR|O_CREAT|O_TRUNC,0666);
+ int count=7;
+
+ if (fd == -1) exit(1);
+
+ for (i=0;i<10000;i++) {
+ write(fd,&i,sizeof(i));
+ }
+
+ close(fd);
+
+ if (fork() == 0) {
+ fd = open(DATA,O_RDWR);
+ if (fd == -1) exit(1);
+
+ buf = (int *)mmap(NULL, 10000*sizeof(int),
+ (PROT_READ | PROT_WRITE),
+ MAP_FILE | MAP_SHARED,
+ fd, 0);
+
+ while (count-- && buf[9124] != 55732) sleep(1);
+
+ if (count <= 0) exit(1);
+
+ buf[1763] = 7268;
+ exit(0);
+ }
+
+ fd = open(DATA,O_RDWR);
+ if (fd == -1) exit(1);
+
+ buf = (int *)mmap(NULL, 10000*sizeof(int),
+ (PROT_READ | PROT_WRITE),
+ MAP_FILE | MAP_SHARED,
+ fd, 0);
+
+ if (buf == (int *)-1) exit(1);
+
+ buf[9124] = 55732;
+
+ while (count-- && buf[1763] != 7268) sleep(1);
+
+ unlink(DATA);
+
+ if (count > 0) exit(0);
+ exit(1);
+}
diff --git a/lib/replace/test/shared_mremap.c b/lib/replace/test/shared_mremap.c
new file mode 100644
index 0000000..05032ad
--- /dev/null
+++ b/lib/replace/test/shared_mremap.c
@@ -0,0 +1,48 @@
+/* this tests whether we can use mremap */
+
+#if defined(HAVE_UNISTD_H)
+#include <unistd.h>
+#endif
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#define DATA "conftest.mmap"
+
+#ifndef MAP_FILE
+#define MAP_FILE 0
+#endif
+
+#ifndef MAP_FAILED
+#define MAP_FAILED (int *)-1
+#endif
+
+main()
+{
+ int *buf;
+ int fd;
+ int err = 1;
+
+ fd = open(DATA, O_RDWR|O_CREAT|O_TRUNC, 0666);
+ if (fd == -1) {
+ exit(1);
+ }
+
+ buf = (int *)mmap(NULL, 0x1000, PROT_READ | PROT_WRITE,
+ MAP_FILE | MAP_SHARED, fd, 0);
+ if (buf == MAP_FAILED) {
+ goto done;
+ }
+
+ buf = mremap(buf, 0x1000, 0x2000, MREMAP_MAYMOVE);
+ if (buf == MAP_FAILED) {
+ goto done;
+ }
+
+ err = 0;
+done:
+ close(fd);
+ unlink(DATA);
+ exit(err);
+}
diff --git a/lib/replace/test/snprintf.c b/lib/replace/test/snprintf.c
new file mode 100644
index 0000000..d06630b
--- /dev/null
+++ b/lib/replace/test/snprintf.c
@@ -0,0 +1,29 @@
+void foo(const char *format, ...)
+{
+ va_list ap;
+ int len;
+ char buf[20];
+ long long l = 1234567890;
+ l *= 100;
+
+ va_start(ap, format);
+ len = vsnprintf(buf, 0, format, ap);
+ va_end(ap);
+ if (len != 5) exit(1);
+
+ va_start(ap, format);
+ len = vsnprintf(0, 0, format, ap);
+ va_end(ap);
+ if (len != 5) exit(2);
+
+ if (snprintf(buf, 3, "hello") != 5 || strcmp(buf, "he") != 0) exit(3);
+
+ if (snprintf(buf, 20, "%lld", l) != 12 || strcmp(buf, "123456789000") != 0) exit(4);
+ if (snprintf(buf, 20, "%zu", 123456789) != 9 || strcmp(buf, "123456789") != 0) exit(5);
+ if (snprintf(buf, 20, "%2\$d %1\$d", 3, 4) != 3 || strcmp(buf, "4 3") != 0) exit(6);
+ if (snprintf(buf, 20, "%s", 0) < 3) exit(7);
+
+ printf("1");
+ exit(0);
+}
+main() { foo("hello"); }
diff --git a/lib/replace/test/strptime.c b/lib/replace/test/strptime.c
new file mode 100644
index 0000000..5bf03f5
--- /dev/null
+++ b/lib/replace/test/strptime.c
@@ -0,0 +1,173 @@
+
+#ifdef LIBREPLACE_CONFIGURE_TEST_STRPTIME
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+
+#define true 1
+#define false 0
+
+#ifndef __STRING
+#define __STRING(x) #x
+#endif
+
+/* make printf a no-op */
+#define printf if(0) printf
+
+#else /* LIBREPLACE_CONFIGURE_TEST_STRPTIME */
+
+#include "replace.h"
+#include "system/time.h"
+#include "replace-test.h"
+
+#endif /* LIBREPLACE_CONFIGURE_TEST_STRPTIME */
+
+int libreplace_test_strptime(void)
+{
+ const char *s = "20070414101546Z";
+ char *ret;
+ struct tm t, t2;
+
+ memset(&t, 0, sizeof(t));
+ memset(&t2, 0, sizeof(t2));
+
+ printf("test: strptime\n");
+
+ ret = strptime(s, "%Y%m%d%H%M%S", &t);
+ if ( ret == NULL ) {
+ printf("failure: strptime [\n"
+ "returned NULL\n"
+ "]\n");
+ return false;
+ }
+
+ if ( *ret != 'Z' ) {
+ printf("failure: strptime [\n"
+ "ret doesn't point to 'Z'\n"
+ "]\n");
+ return false;
+ }
+
+ ret = strptime(s, "%Y%m%d%H%M%SZ", &t2);
+ if ( ret == NULL ) {
+ printf("failure: strptime [\n"
+ "returned NULL with Z\n"
+ "]\n");
+ return false;
+ }
+
+ if ( *ret != '\0' ) {
+ printf("failure: strptime [\n"
+ "ret doesn't point to '\\0'\n"
+ "]\n");
+ return false;
+ }
+
+#define CMP_TM_ELEMENT(t1,t2,elem) \
+ if (t1.elem != t2.elem) { \
+ printf("failure: strptime [\n" \
+ "result differs if the format string has a 'Z' at the end\n" \
+ "element: %s %d != %d\n" \
+ "]\n", \
+ __STRING(elen), t1.elem, t2.elem); \
+ return false; \
+ }
+
+ CMP_TM_ELEMENT(t,t2,tm_sec);
+ CMP_TM_ELEMENT(t,t2,tm_min);
+ CMP_TM_ELEMENT(t,t2,tm_hour);
+ CMP_TM_ELEMENT(t,t2,tm_mday);
+ CMP_TM_ELEMENT(t,t2,tm_mon);
+ CMP_TM_ELEMENT(t,t2,tm_year);
+ CMP_TM_ELEMENT(t,t2,tm_wday);
+ CMP_TM_ELEMENT(t,t2,tm_yday);
+ CMP_TM_ELEMENT(t,t2,tm_isdst);
+
+ if (t.tm_sec != 46) {
+ printf("failure: strptime [\n"
+ "tm_sec: expected: 46, got: %d\n"
+ "]\n",
+ t.tm_sec);
+ return false;
+ }
+
+ if (t.tm_min != 15) {
+ printf("failure: strptime [\n"
+ "tm_min: expected: 15, got: %d\n"
+ "]\n",
+ t.tm_min);
+ return false;
+ }
+
+ if (t.tm_hour != 10) {
+ printf("failure: strptime [\n"
+ "tm_hour: expected: 10, got: %d\n"
+ "]\n",
+ t.tm_hour);
+ return false;
+ }
+
+ if (t.tm_mday != 14) {
+ printf("failure: strptime [\n"
+ "tm_mday: expected: 14, got: %d\n"
+ "]\n",
+ t.tm_mday);
+ return false;
+ }
+
+ if (t.tm_mon != 3) {
+ printf("failure: strptime [\n"
+ "tm_mon: expected: 3, got: %d\n"
+ "]\n",
+ t.tm_mon);
+ return false;
+ }
+
+ if (t.tm_year != 107) {
+ printf("failure: strptime [\n"
+ "tm_year: expected: 107, got: %d\n"
+ "]\n",
+ t.tm_year);
+ return false;
+ }
+
+ if (t.tm_wday != 6) { /* saturday */
+ printf("failure: strptime [\n"
+ "tm_wday: expected: 6, got: %d\n"
+ "]\n",
+ t.tm_wday);
+ return false;
+ }
+
+ if (t.tm_yday != 103) {
+ printf("failure: strptime [\n"
+ "tm_yday: expected: 103, got: %d\n"
+ "]\n",
+ t.tm_yday);
+ return false;
+ }
+
+ /* we don't test this as it depends on the host configuration
+ if (t.tm_isdst != 0) {
+ printf("failure: strptime [\n"
+ "tm_isdst: expected: 0, got: %d\n"
+ "]\n",
+ t.tm_isdst);
+ return false;
+ }*/
+
+ printf("success: strptime\n");
+
+ return true;
+}
+
+#ifdef LIBREPLACE_CONFIGURE_TEST_STRPTIME
+int main (void)
+{
+ int ret;
+ ret = libreplace_test_strptime();
+ if (ret == false) return 1;
+ return 0;
+}
+#endif
diff --git a/lib/replace/test/testsuite.c b/lib/replace/test/testsuite.c
new file mode 100644
index 0000000..017b8ed
--- /dev/null
+++ b/lib/replace/test/testsuite.c
@@ -0,0 +1,1114 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ libreplace tests
+
+ Copyright (C) Jelmer Vernooij 2006
+
+ ** NOTE! The following LGPL license applies to the talloc
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "replace-test.h"
+#include "replace-testsuite.h"
+
+/*
+ we include all the system/ include files here so that libreplace tests
+ them in the build farm
+*/
+#include "system/capability.h"
+#include "system/dir.h"
+#include "system/filesys.h"
+#include "system/glob.h"
+#include "system/iconv.h"
+#include "system/locale.h"
+#include "system/network.h"
+#include "system/passwd.h"
+#include "system/readline.h"
+#include "system/select.h"
+#include "system/shmem.h"
+#include "system/syslog.h"
+#include "system/terminal.h"
+#include "system/time.h"
+#include "system/wait.h"
+#include "system/aio.h"
+
+#define TESTFILE "testfile.dat"
+
+
+/*
+ test ftruncate() function
+ */
+static int test_ftruncate(void)
+{
+ struct stat st;
+ int fd;
+ const int size = 1234;
+ printf("test: ftruncate\n");
+ unlink(TESTFILE);
+ fd = open(TESTFILE, O_RDWR|O_CREAT, 0600);
+ if (fd == -1) {
+ printf("failure: ftruncate [\n"
+ "creating '%s' failed - %s\n]\n", TESTFILE, strerror(errno));
+ return false;
+ }
+ if (ftruncate(fd, size) != 0) {
+ printf("failure: ftruncate [\n%s\n]\n", strerror(errno));
+ return false;
+ }
+ if (fstat(fd, &st) != 0) {
+ printf("failure: ftruncate [\nfstat failed - %s\n]\n", strerror(errno));
+ return false;
+ }
+ if (st.st_size != size) {
+ printf("failure: ftruncate [\ngave wrong size %d - expected %d\n]\n",
+ (int)st.st_size, size);
+ return false;
+ }
+ unlink(TESTFILE);
+ printf("success: ftruncate\n");
+ return true;
+}
+
+/*
+ test strlcpy() function.
+ see http://www.gratisoft.us/todd/papers/strlcpy.html
+ */
+static int test_strlcpy(void)
+{
+ char buf[4];
+ const struct {
+ const char *src;
+ size_t result;
+ } tests[] = {
+ { "abc", 3 },
+ { "abcdef", 6 },
+ { "abcd", 4 },
+ { "", 0 },
+ { NULL, 0 }
+ };
+ int i;
+ printf("test: strlcpy\n");
+ for (i=0;tests[i].src;i++) {
+ if (strlcpy(buf, tests[i].src, sizeof(buf)) != tests[i].result) {
+ printf("failure: strlcpy [\ntest %d failed\n]\n", i);
+ return false;
+ }
+ }
+ printf("success: strlcpy\n");
+ return true;
+}
+
+static int test_strlcat(void)
+{
+ char tmp[10];
+ printf("test: strlcat\n");
+ strlcpy(tmp, "", sizeof(tmp));
+ if (strlcat(tmp, "bla", 3) != 3) {
+ printf("failure: strlcat [\ninvalid return code\n]\n");
+ return false;
+ }
+ if (strcmp(tmp, "bl") != 0) {
+ printf("failure: strlcat [\nexpected \"bl\", got \"%s\"\n]\n",
+ tmp);
+ return false;
+ }
+
+ strlcpy(tmp, "da", sizeof(tmp));
+ if (strlcat(tmp, "me", 4) != 4) {
+ printf("failure: strlcat [\nexpected \"dam\", got \"%s\"\n]\n",
+ tmp);
+ return false;
+ }
+
+ printf("success: strlcat\n");
+ return true;
+}
+
+static int test_mktime(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_initgroups(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_memmove(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_strdup(void)
+{
+ char *x;
+ printf("test: strdup\n");
+ x = strdup("bla");
+ if (strcmp("bla", x) != 0) {
+ printf("failure: strdup [\nfailed: expected \"bla\", got \"%s\"\n]\n",
+ x);
+ return false;
+ }
+ free(x);
+ printf("success: strdup\n");
+ return true;
+}
+
+static int test_setlinebuf(void)
+{
+ printf("test: setlinebuf\n");
+ setlinebuf(stdout);
+ printf("success: setlinebuf\n");
+ return true;
+}
+
+static int test_vsyslog(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_timegm(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_setenv(void)
+{
+#define TEST_SETENV(key, value, overwrite, result) do { \
+ int _ret; \
+ char *_v; \
+ _ret = setenv(key, value, overwrite); \
+ if (_ret != 0) { \
+ printf("failure: setenv [\n" \
+ "setenv(%s, %s, %d) failed\n" \
+ "]\n", \
+ key, value, overwrite); \
+ return false; \
+ } \
+ _v=getenv(key); \
+ if (!_v) { \
+ printf("failure: setenv [\n" \
+ "getenv(%s) returned NULL\n" \
+ "]\n", \
+ key); \
+ return false; \
+ } \
+ if (strcmp(result, _v) != 0) { \
+ printf("failure: setenv [\n" \
+ "getenv(%s): '%s' != '%s'\n" \
+ "]\n", \
+ key, result, _v); \
+ return false; \
+ } \
+} while(0)
+
+#define TEST_UNSETENV(key) do { \
+ char *_v; \
+ unsetenv(key); \
+ _v=getenv(key); \
+ if (_v) { \
+ printf("failure: setenv [\n" \
+ "getenv(%s): NULL != '%s'\n" \
+ "]\n", \
+ SETENVTEST_KEY, _v); \
+ return false; \
+ } \
+} while (0)
+
+#define SETENVTEST_KEY "SETENVTESTKEY"
+#define SETENVTEST_VAL "SETENVTESTVAL"
+
+ printf("test: setenv\n");
+ TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"1", 0, SETENVTEST_VAL"1");
+ TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"2", 0, SETENVTEST_VAL"1");
+ TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"3", 1, SETENVTEST_VAL"3");
+ TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"4", 1, SETENVTEST_VAL"4");
+ TEST_UNSETENV(SETENVTEST_KEY);
+ TEST_UNSETENV(SETENVTEST_KEY);
+ TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"5", 0, SETENVTEST_VAL"5");
+ TEST_UNSETENV(SETENVTEST_KEY);
+ TEST_UNSETENV(SETENVTEST_KEY);
+ printf("success: setenv\n");
+ return true;
+}
+
+static int test_strndup(void)
+{
+ char *x;
+ printf("test: strndup\n");
+ x = strndup("bla", 0);
+ if (strcmp(x, "") != 0) {
+ printf("failure: strndup [\ninvalid\n]\n");
+ return false;
+ }
+ free(x);
+ x = strndup("bla", 2);
+ if (strcmp(x, "bl") != 0) {
+ printf("failure: strndup [\ninvalid\n]\n");
+ return false;
+ }
+ free(x);
+ x = strndup("bla", 10);
+ if (strcmp(x, "bla") != 0) {
+ printf("failure: strndup [\ninvalid\n]\n");
+ free(x);
+ return false;
+ }
+ free(x);
+ printf("success: strndup\n");
+ return true;
+}
+
+static int test_strnlen(void)
+{
+ printf("test: strnlen\n");
+ if (strnlen("bla", 2) != 2) {
+ printf("failure: strnlen [\nunexpected length\n]\n");
+ return false;
+ }
+
+ if (strnlen("some text\n", 0) != 0) {
+ printf("failure: strnlen [\nunexpected length\n]\n");
+ return false;
+ }
+
+ if (strnlen("some text", 20) != 9) {
+ printf("failure: strnlen [\nunexpected length\n]\n");
+ return false;
+ }
+
+ printf("success: strnlen\n");
+ return true;
+}
+
+static int test_waitpid(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_seteuid(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_setegid(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_asprintf(void)
+{
+ char *x;
+ printf("test: asprintf\n");
+ if (asprintf(&x, "%d", 9) != 1) {
+ printf("failure: asprintf [\ngenerate asprintf\n]\n");
+ return false;
+ }
+ if (strcmp(x, "9") != 0) {
+ printf("failure: asprintf [\ngenerate asprintf\n]\n");
+ return false;
+ }
+ if (asprintf(&x, "dat%s", "a") != 4) {
+ printf("failure: asprintf [\ngenerate asprintf\n]\n");
+ return false;
+ }
+ if (strcmp(x, "data") != 0) {
+ printf("failure: asprintf [\ngenerate asprintf\n]\n");
+ return false;
+ }
+ printf("success: asprintf\n");
+ return true;
+}
+
+static int test_snprintf(void)
+{
+ char tmp[10];
+ printf("test: snprintf\n");
+ if (snprintf(tmp, 3, "foo%d", 9) != 4) {
+ printf("failure: snprintf [\nsnprintf return code failed\n]\n");
+ return false;
+ }
+
+ if (strcmp(tmp, "fo") != 0) {
+ printf("failure: snprintf [\nsnprintf failed\n]\n");
+ return false;
+ }
+
+ printf("success: snprintf\n");
+ return true;
+}
+
+static int test_vasprintf(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_vsnprintf(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_opendir(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_readdir(void)
+{
+ printf("test: readdir\n");
+ if (test_readdir_os2_delete() != 0) {
+ return false;
+ }
+ printf("success: readdir\n");
+ return true;
+}
+
+static int test_telldir(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_seekdir(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_dlopen(void)
+{
+ /* FIXME: test dlopen, dlsym, dlclose, dlerror */
+ return true;
+}
+
+
+static int test_chroot(void)
+{
+ /* FIXME: chroot() */
+ return true;
+}
+
+static int test_bzero(void)
+{
+ /* FIXME: bzero */
+ return true;
+}
+
+static int test_strerror(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_errno(void)
+{
+ printf("test: errno\n");
+ errno = 3;
+ if (errno != 3) {
+ printf("failure: errno [\nerrno failed\n]\n");
+ return false;
+ }
+
+ printf("success: errno\n");
+ return true;
+}
+
+static int test_mkdtemp(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_mkstemp(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_pread(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_pwrite(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_inet_ntoa(void)
+{
+ /* FIXME */
+ return true;
+}
+
+#define TEST_STRTO_X(type,fmt,func,str,base,res,diff,rrnoo) do {\
+ type _v; \
+ char _s[64]; \
+ char *_p = NULL;\
+ char *_ep = NULL; \
+ strlcpy(_s, str, sizeof(_s));\
+ if (diff >= 0) { \
+ _ep = &_s[diff]; \
+ } \
+ errno = 0; \
+ _v = func(_s, &_p, base); \
+ if (errno != rrnoo) { \
+ printf("failure: %s [\n" \
+ "\t%s\n" \
+ "\t%s(\"%s\",%d,%d): " fmt " (=/!)= " fmt "\n" \
+ "\terrno: %d != %d\n" \
+ "]\n", \
+ __STRING(func), __location__, __STRING(func), \
+ str, diff, base, res, _v, rrnoo, errno); \
+ return false; \
+ } else if (_v != res) { \
+ printf("failure: %s [\n" \
+ "\t%s\n" \
+ "\t%s(\"%s\",%d,%d): " fmt " != " fmt "\n" \
+ "]\n", \
+ __STRING(func), __location__, __STRING(func), \
+ str, diff, base, res, _v); \
+ return false; \
+ } else if (_p != _ep) { \
+ printf("failure: %s [\n" \
+ "\t%s\n" \
+ "\t%s(\"%s\",%d,%d): " fmt " (=/!)= " fmt "\n" \
+ "\tptr: %p - %p = %d != %d\n" \
+ "]\n", \
+ __STRING(func), __location__, __STRING(func), \
+ str, diff, base, res, _v, _ep, _p, (int)(diff - (_ep - _p)), diff); \
+ return false; \
+ } \
+} while (0)
+
+static int test_strtoll(void)
+{
+ printf("test: strtoll\n");
+
+#define TEST_STRTOLL(str,base,res,diff,errnoo) TEST_STRTO_X(long long int, "%lld", strtoll,str,base,res,diff,errnoo)
+
+ TEST_STRTOLL("15", 10, 15LL, 2, 0);
+ TEST_STRTOLL(" 15", 10, 15LL, 4, 0);
+ TEST_STRTOLL("15", 0, 15LL, 2, 0);
+ TEST_STRTOLL(" 15 ", 0, 15LL, 3, 0);
+ TEST_STRTOLL("+15", 10, 15LL, 3, 0);
+ TEST_STRTOLL(" +15", 10, 15LL, 5, 0);
+ TEST_STRTOLL("+15", 0, 15LL, 3, 0);
+ TEST_STRTOLL(" +15 ", 0, 15LL, 4, 0);
+ TEST_STRTOLL("-15", 10, -15LL, 3, 0);
+ TEST_STRTOLL(" -15", 10, -15LL, 5, 0);
+ TEST_STRTOLL("-15", 0, -15LL, 3, 0);
+ TEST_STRTOLL(" -15 ", 0, -15LL, 4, 0);
+ TEST_STRTOLL("015", 10, 15LL, 3, 0);
+ TEST_STRTOLL(" 015", 10, 15LL, 5, 0);
+ TEST_STRTOLL("015", 0, 13LL, 3, 0);
+ TEST_STRTOLL(" 015", 0, 13LL, 5, 0);
+ TEST_STRTOLL("0x15", 10, 0LL, 1, 0);
+ TEST_STRTOLL(" 0x15", 10, 0LL, 3, 0);
+ TEST_STRTOLL("0x15", 0, 21LL, 4, 0);
+ TEST_STRTOLL(" 0x15", 0, 21LL, 6, 0);
+
+ TEST_STRTOLL("10", 16, 16LL, 2, 0);
+ TEST_STRTOLL(" 10 ", 16, 16LL, 4, 0);
+ TEST_STRTOLL("0x10", 16, 16LL, 4, 0);
+ TEST_STRTOLL("0x10", 0, 16LL, 4, 0);
+ TEST_STRTOLL(" 0x10 ", 0, 16LL, 5, 0);
+ TEST_STRTOLL("+10", 16, 16LL, 3, 0);
+ TEST_STRTOLL(" +10 ", 16, 16LL, 5, 0);
+ TEST_STRTOLL("+0x10", 16, 16LL, 5, 0);
+ TEST_STRTOLL("+0x10", 0, 16LL, 5, 0);
+ TEST_STRTOLL(" +0x10 ", 0, 16LL, 6, 0);
+ TEST_STRTOLL("-10", 16, -16LL, 3, 0);
+ TEST_STRTOLL(" -10 ", 16, -16LL, 5, 0);
+ TEST_STRTOLL("-0x10", 16, -16LL, 5, 0);
+ TEST_STRTOLL("-0x10", 0, -16LL, 5, 0);
+ TEST_STRTOLL(" -0x10 ", 0, -16LL, 6, 0);
+ TEST_STRTOLL("010", 16, 16LL, 3, 0);
+ TEST_STRTOLL(" 010 ", 16, 16LL, 5, 0);
+ TEST_STRTOLL("-010", 16, -16LL, 4, 0);
+
+ TEST_STRTOLL("11", 8, 9LL, 2, 0);
+ TEST_STRTOLL("011", 8, 9LL, 3, 0);
+ TEST_STRTOLL("011", 0, 9LL, 3, 0);
+ TEST_STRTOLL("-11", 8, -9LL, 3, 0);
+ TEST_STRTOLL("-011", 8, -9LL, 4, 0);
+ TEST_STRTOLL("-011", 0, -9LL, 4, 0);
+
+ TEST_STRTOLL("011", 8, 9LL, 3, 0);
+ TEST_STRTOLL("011", 0, 9LL, 3, 0);
+ TEST_STRTOLL("-11", 8, -9LL, 3, 0);
+ TEST_STRTOLL("-011", 8, -9LL, 4, 0);
+ TEST_STRTOLL("-011", 0, -9LL, 4, 0);
+
+ TEST_STRTOLL("Text", 0, 0LL, 0, 0);
+
+ TEST_STRTOLL("9223372036854775807", 10, 9223372036854775807LL, 19, 0);
+ TEST_STRTOLL("9223372036854775807", 0, 9223372036854775807LL, 19, 0);
+ TEST_STRTOLL("9223372036854775808", 0, 9223372036854775807LL, 19, ERANGE);
+ TEST_STRTOLL("9223372036854775808", 10, 9223372036854775807LL, 19, ERANGE);
+ TEST_STRTOLL("0x7FFFFFFFFFFFFFFF", 0, 9223372036854775807LL, 18, 0);
+ TEST_STRTOLL("0x7FFFFFFFFFFFFFFF", 16, 9223372036854775807LL, 18, 0);
+ TEST_STRTOLL("7FFFFFFFFFFFFFFF", 16, 9223372036854775807LL, 16, 0);
+ TEST_STRTOLL("0x8000000000000000", 0, 9223372036854775807LL, 18, ERANGE);
+ TEST_STRTOLL("0x8000000000000000", 16, 9223372036854775807LL, 18, ERANGE);
+ TEST_STRTOLL("80000000000000000", 16, 9223372036854775807LL, 17, ERANGE);
+ TEST_STRTOLL("0777777777777777777777", 0, 9223372036854775807LL, 22, 0);
+ TEST_STRTOLL("0777777777777777777777", 8, 9223372036854775807LL, 22, 0);
+ TEST_STRTOLL("777777777777777777777", 8, 9223372036854775807LL, 21, 0);
+ TEST_STRTOLL("01000000000000000000000", 0, 9223372036854775807LL, 23, ERANGE);
+ TEST_STRTOLL("01000000000000000000000", 8, 9223372036854775807LL, 23, ERANGE);
+ TEST_STRTOLL("1000000000000000000000", 8, 9223372036854775807LL, 22, ERANGE);
+
+ TEST_STRTOLL("-9223372036854775808", 10, -9223372036854775807LL -1, 20, 0);
+ TEST_STRTOLL("-9223372036854775808", 0, -9223372036854775807LL -1, 20, 0);
+ TEST_STRTOLL("-9223372036854775809", 0, -9223372036854775807LL -1, 20, ERANGE);
+ TEST_STRTOLL("-9223372036854775809", 10, -9223372036854775807LL -1, 20, ERANGE);
+ TEST_STRTOLL("-0x8000000000000000", 0, -9223372036854775807LL -1, 19, 0);
+ TEST_STRTOLL("-0x8000000000000000", 16, -9223372036854775807LL -1, 19, 0);
+ TEST_STRTOLL("-8000000000000000", 16, -9223372036854775807LL -1, 17, 0);
+ TEST_STRTOLL("-0x8000000000000001", 0, -9223372036854775807LL -1, 19, ERANGE);
+ TEST_STRTOLL("-0x8000000000000001", 16, -9223372036854775807LL -1, 19, ERANGE);
+ TEST_STRTOLL("-80000000000000001", 16, -9223372036854775807LL -1, 18, ERANGE);
+ TEST_STRTOLL("-01000000000000000000000",0, -9223372036854775807LL -1, 24, 0);
+ TEST_STRTOLL("-01000000000000000000000",8, -9223372036854775807LL -1, 24, 0);
+ TEST_STRTOLL("-1000000000000000000000", 8, -9223372036854775807LL -1, 23, 0);
+ TEST_STRTOLL("-01000000000000000000001",0, -9223372036854775807LL -1, 24, ERANGE);
+ TEST_STRTOLL("-01000000000000000000001",8, -9223372036854775807LL -1, 24, ERANGE);
+ TEST_STRTOLL("-1000000000000000000001", 8, -9223372036854775807LL -1, 23, ERANGE);
+
+ printf("success: strtoll\n");
+ return true;
+}
+
+static int test_strtoull(void)
+{
+ printf("test: strtoull\n");
+
+#define TEST_STRTOULL(str,base,res,diff,errnoo) TEST_STRTO_X(long long unsigned int,"%llu",strtoull,str,base,res,diff,errnoo)
+
+ TEST_STRTOULL("15", 10, 15LLU, 2, 0);
+ TEST_STRTOULL(" 15", 10, 15LLU, 4, 0);
+ TEST_STRTOULL("15", 0, 15LLU, 2, 0);
+ TEST_STRTOULL(" 15 ", 0, 15LLU, 3, 0);
+ TEST_STRTOULL("+15", 10, 15LLU, 3, 0);
+ TEST_STRTOULL(" +15", 10, 15LLU, 5, 0);
+ TEST_STRTOULL("+15", 0, 15LLU, 3, 0);
+ TEST_STRTOULL(" +15 ", 0, 15LLU, 4, 0);
+ TEST_STRTOULL("-15", 10, 18446744073709551601LLU, 3, 0);
+ TEST_STRTOULL(" -15", 10, 18446744073709551601LLU, 5, 0);
+ TEST_STRTOULL("-15", 0, 18446744073709551601LLU, 3, 0);
+ TEST_STRTOULL(" -15 ", 0, 18446744073709551601LLU, 4, 0);
+ TEST_STRTOULL("015", 10, 15LLU, 3, 0);
+ TEST_STRTOULL(" 015", 10, 15LLU, 5, 0);
+ TEST_STRTOULL("015", 0, 13LLU, 3, 0);
+ TEST_STRTOULL(" 015", 0, 13LLU, 5, 0);
+ TEST_STRTOULL("0x15", 10, 0LLU, 1, 0);
+ TEST_STRTOULL(" 0x15", 10, 0LLU, 3, 0);
+ TEST_STRTOULL("0x15", 0, 21LLU, 4, 0);
+ TEST_STRTOULL(" 0x15", 0, 21LLU, 6, 0);
+
+ TEST_STRTOULL("10", 16, 16LLU, 2, 0);
+ TEST_STRTOULL(" 10 ", 16, 16LLU, 4, 0);
+ TEST_STRTOULL("0x10", 16, 16LLU, 4, 0);
+ TEST_STRTOULL("0x10", 0, 16LLU, 4, 0);
+ TEST_STRTOULL(" 0x10 ", 0, 16LLU, 5, 0);
+ TEST_STRTOULL("+10", 16, 16LLU, 3, 0);
+ TEST_STRTOULL(" +10 ", 16, 16LLU, 5, 0);
+ TEST_STRTOULL("+0x10", 16, 16LLU, 5, 0);
+ TEST_STRTOULL("+0x10", 0, 16LLU, 5, 0);
+ TEST_STRTOULL(" +0x10 ", 0, 16LLU, 6, 0);
+ TEST_STRTOULL("-10", 16, -16LLU, 3, 0);
+ TEST_STRTOULL(" -10 ", 16, -16LLU, 5, 0);
+ TEST_STRTOULL("-0x10", 16, -16LLU, 5, 0);
+ TEST_STRTOULL("-0x10", 0, -16LLU, 5, 0);
+ TEST_STRTOULL(" -0x10 ", 0, -16LLU, 6, 0);
+ TEST_STRTOULL("010", 16, 16LLU, 3, 0);
+ TEST_STRTOULL(" 010 ", 16, 16LLU, 5, 0);
+ TEST_STRTOULL("-010", 16, -16LLU, 4, 0);
+
+ TEST_STRTOULL("11", 8, 9LLU, 2, 0);
+ TEST_STRTOULL("011", 8, 9LLU, 3, 0);
+ TEST_STRTOULL("011", 0, 9LLU, 3, 0);
+ TEST_STRTOULL("-11", 8, -9LLU, 3, 0);
+ TEST_STRTOULL("-011", 8, -9LLU, 4, 0);
+ TEST_STRTOULL("-011", 0, -9LLU, 4, 0);
+
+ TEST_STRTOULL("011", 8, 9LLU, 3, 0);
+ TEST_STRTOULL("011", 0, 9LLU, 3, 0);
+ TEST_STRTOULL("-11", 8, -9LLU, 3, 0);
+ TEST_STRTOULL("-011", 8, -9LLU, 4, 0);
+ TEST_STRTOULL("-011", 0, -9LLU, 4, 0);
+
+ TEST_STRTOULL("Text", 0, 0LLU, 0, 0);
+
+ TEST_STRTOULL("9223372036854775807", 10, 9223372036854775807LLU, 19, 0);
+ TEST_STRTOULL("9223372036854775807", 0, 9223372036854775807LLU, 19, 0);
+ TEST_STRTOULL("9223372036854775808", 0, 9223372036854775808LLU, 19, 0);
+ TEST_STRTOULL("9223372036854775808", 10, 9223372036854775808LLU, 19, 0);
+ TEST_STRTOULL("0x7FFFFFFFFFFFFFFF", 0, 9223372036854775807LLU, 18, 0);
+ TEST_STRTOULL("0x7FFFFFFFFFFFFFFF", 16, 9223372036854775807LLU, 18, 0);
+ TEST_STRTOULL("7FFFFFFFFFFFFFFF", 16, 9223372036854775807LLU, 16, 0);
+ TEST_STRTOULL("0x8000000000000000", 0, 9223372036854775808LLU, 18, 0);
+ TEST_STRTOULL("0x8000000000000000", 16, 9223372036854775808LLU, 18, 0);
+ TEST_STRTOULL("8000000000000000", 16, 9223372036854775808LLU, 16, 0);
+ TEST_STRTOULL("0777777777777777777777", 0, 9223372036854775807LLU, 22, 0);
+ TEST_STRTOULL("0777777777777777777777", 8, 9223372036854775807LLU, 22, 0);
+ TEST_STRTOULL("777777777777777777777", 8, 9223372036854775807LLU, 21, 0);
+ TEST_STRTOULL("01000000000000000000000",0, 9223372036854775808LLU, 23, 0);
+ TEST_STRTOULL("01000000000000000000000",8, 9223372036854775808LLU, 23, 0);
+ TEST_STRTOULL("1000000000000000000000", 8, 9223372036854775808LLU, 22, 0);
+
+ TEST_STRTOULL("-9223372036854775808", 10, 9223372036854775808LLU, 20, 0);
+ TEST_STRTOULL("-9223372036854775808", 0, 9223372036854775808LLU, 20, 0);
+ TEST_STRTOULL("-9223372036854775809", 0, 9223372036854775807LLU, 20, 0);
+ TEST_STRTOULL("-9223372036854775809", 10, 9223372036854775807LLU, 20, 0);
+ TEST_STRTOULL("-0x8000000000000000", 0, 9223372036854775808LLU, 19, 0);
+ TEST_STRTOULL("-0x8000000000000000", 16, 9223372036854775808LLU, 19, 0);
+ TEST_STRTOULL("-8000000000000000", 16, 9223372036854775808LLU, 17, 0);
+ TEST_STRTOULL("-0x8000000000000001", 0, 9223372036854775807LLU, 19, 0);
+ TEST_STRTOULL("-0x8000000000000001", 16, 9223372036854775807LLU, 19, 0);
+ TEST_STRTOULL("-8000000000000001", 16, 9223372036854775807LLU, 17, 0);
+ TEST_STRTOULL("-01000000000000000000000",0, 9223372036854775808LLU, 24, 0);
+ TEST_STRTOULL("-01000000000000000000000",8, 9223372036854775808LLU, 24, 0);
+ TEST_STRTOULL("-1000000000000000000000",8, 9223372036854775808LLU, 23, 0);
+ TEST_STRTOULL("-01000000000000000000001",0, 9223372036854775807LLU, 24, 0);
+ TEST_STRTOULL("-01000000000000000000001",8, 9223372036854775807LLU, 24, 0);
+ TEST_STRTOULL("-1000000000000000000001",8, 9223372036854775807LLU, 23, 0);
+
+ TEST_STRTOULL("18446744073709551615", 0, 18446744073709551615LLU, 20, 0);
+ TEST_STRTOULL("18446744073709551615", 10, 18446744073709551615LLU, 20, 0);
+ TEST_STRTOULL("18446744073709551616", 0, 18446744073709551615LLU, 20, ERANGE);
+ TEST_STRTOULL("18446744073709551616", 10, 18446744073709551615LLU, 20, ERANGE);
+ TEST_STRTOULL("0xFFFFFFFFFFFFFFFF", 0, 18446744073709551615LLU, 18, 0);
+ TEST_STRTOULL("0xFFFFFFFFFFFFFFFF", 16, 18446744073709551615LLU, 18, 0);
+ TEST_STRTOULL("FFFFFFFFFFFFFFFF", 16, 18446744073709551615LLU, 16, 0);
+ TEST_STRTOULL("0x10000000000000000", 0, 18446744073709551615LLU, 19, ERANGE);
+ TEST_STRTOULL("0x10000000000000000", 16, 18446744073709551615LLU, 19, ERANGE);
+ TEST_STRTOULL("10000000000000000", 16, 18446744073709551615LLU, 17, ERANGE);
+ TEST_STRTOULL("01777777777777777777777",0, 18446744073709551615LLU, 23, 0);
+ TEST_STRTOULL("01777777777777777777777",8, 18446744073709551615LLU, 23, 0);
+ TEST_STRTOULL("1777777777777777777777", 8, 18446744073709551615LLU, 22, 0);
+ TEST_STRTOULL("02000000000000000000000",0, 18446744073709551615LLU, 23, ERANGE);
+ TEST_STRTOULL("02000000000000000000000",8, 18446744073709551615LLU, 23, ERANGE);
+ TEST_STRTOULL("2000000000000000000000", 8, 18446744073709551615LLU, 22, ERANGE);
+
+ TEST_STRTOULL("-18446744073709551615", 0, 1LLU, 21, 0);
+ TEST_STRTOULL("-18446744073709551615", 10, 1LLU, 21, 0);
+ TEST_STRTOULL("-18446744073709551616", 0, 18446744073709551615LLU, 21, ERANGE);
+ TEST_STRTOULL("-18446744073709551616", 10, 18446744073709551615LLU, 21, ERANGE);
+ TEST_STRTOULL("-0xFFFFFFFFFFFFFFFF", 0, 1LLU, 19, 0);
+ TEST_STRTOULL("-0xFFFFFFFFFFFFFFFF", 16, 1LLU, 19, 0);
+ TEST_STRTOULL("-FFFFFFFFFFFFFFFF", 16, 1LLU, 17, 0);
+ TEST_STRTOULL("-0x10000000000000000", 0, 18446744073709551615LLU, 20, ERANGE);
+ TEST_STRTOULL("-0x10000000000000000", 16, 18446744073709551615LLU, 20, ERANGE);
+ TEST_STRTOULL("-10000000000000000", 16, 18446744073709551615LLU, 18, ERANGE);
+ TEST_STRTOULL("-01777777777777777777777",0, 1LLU, 24, 0);
+ TEST_STRTOULL("-01777777777777777777777",8, 1LLU, 24, 0);
+ TEST_STRTOULL("-1777777777777777777777",8, 1LLU, 23, 0);
+ TEST_STRTOULL("-02000000000000000000000",0, 18446744073709551615LLU, 24, ERANGE);
+ TEST_STRTOULL("-02000000000000000000000",8, 18446744073709551615LLU, 24, ERANGE);
+ TEST_STRTOULL("-2000000000000000000000",8, 18446744073709551615LLU, 23, ERANGE);
+
+ printf("success: strtoull\n");
+ return true;
+}
+
+/*
+FIXME:
+Types:
+bool
+socklen_t
+uint{8,16,32,64}_t
+int{8,16,32,64}_t
+intptr_t
+
+Constants:
+PATH_NAME_MAX
+UINT{16,32,64}_MAX
+INT32_MAX
+*/
+
+static int test_va_copy(void)
+{
+ /* FIXME */
+ return true;
+}
+
+static int test_FUNCTION(void)
+{
+ printf("test: FUNCTION\n");
+ if (strcmp(__FUNCTION__, "test_FUNCTION") != 0) {
+ printf("failure: FUNCTION [\nFUNCTION invalid\n]\n");
+ return false;
+ }
+ printf("success: FUNCTION\n");
+ return true;
+}
+
+static int test_MIN(void)
+{
+ printf("test: MIN\n");
+ if (MIN(20, 1) != 1) {
+ printf("failure: MIN [\nMIN invalid\n]\n");
+ return false;
+ }
+ if (MIN(1, 20) != 1) {
+ printf("failure: MIN [\nMIN invalid\n]\n");
+ return false;
+ }
+ printf("success: MIN\n");
+ return true;
+}
+
+static int test_MAX(void)
+{
+ printf("test: MAX\n");
+ if (MAX(20, 1) != 20) {
+ printf("failure: MAX [\nMAX invalid\n]\n");
+ return false;
+ }
+ if (MAX(1, 20) != 20) {
+ printf("failure: MAX [\nMAX invalid\n]\n");
+ return false;
+ }
+ printf("success: MAX\n");
+ return true;
+}
+
+static int test_socketpair(void)
+{
+ int sock[2];
+ char buf[20];
+
+ printf("test: socketpair\n");
+
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, sock) == -1) {
+ printf("failure: socketpair [\n"
+ "socketpair() failed\n"
+ "]\n");
+ return false;
+ }
+
+ if (write(sock[1], "automatisch", 12) == -1) {
+ printf("failure: socketpair [\n"
+ "write() failed: %s\n"
+ "]\n", strerror(errno));
+ return false;
+ }
+
+ if (read(sock[0], buf, 12) == -1) {
+ printf("failure: socketpair [\n"
+ "read() failed: %s\n"
+ "]\n", strerror(errno));
+ return false;
+ }
+
+ if (strcmp(buf, "automatisch") != 0) {
+ printf("failure: socketpair [\n"
+ "expected: automatisch, got: %s\n"
+ "]\n", buf);
+ return false;
+ }
+
+ printf("success: socketpair\n");
+
+ return true;
+}
+
+extern int libreplace_test_strptime(void);
+
+static int test_strptime(void)
+{
+ return libreplace_test_strptime();
+}
+
+extern int getifaddrs_test(void);
+
+static int test_getifaddrs(void)
+{
+
+ printf("test: getifaddrs\n");
+
+ if (getifaddrs_test() != 0) {
+ printf("failure: getifaddrs\n");
+ return false;
+ }
+
+ printf("success: getifaddrs\n");
+ return true;
+}
+
+static int test_utime(void)
+{
+ struct utimbuf u;
+ struct stat st1, st2, st3;
+ int fd;
+
+ printf("test: utime\n");
+ unlink(TESTFILE);
+
+ fd = open(TESTFILE, O_RDWR|O_CREAT, 0600);
+ if (fd == -1) {
+ printf("failure: utime [\n"
+ "creating '%s' failed - %s\n]\n",
+ TESTFILE, strerror(errno));
+ return false;
+ }
+
+ if (fstat(fd, &st1) != 0) {
+ printf("failure: utime [\n"
+ "fstat (1) failed - %s\n]\n",
+ strerror(errno));
+ close(fd);
+ return false;
+ }
+
+ u.actime = st1.st_atime + 300;
+ u.modtime = st1.st_mtime - 300;
+ if (utime(TESTFILE, &u) != 0) {
+ printf("failure: utime [\n"
+ "utime(&u) failed - %s\n]\n",
+ strerror(errno));
+ close(fd);
+ return false;
+ }
+
+ if (fstat(fd, &st2) != 0) {
+ printf("failure: utime [\n"
+ "fstat (2) failed - %s\n]\n",
+ strerror(errno));
+ close(fd);
+ return false;
+ }
+
+ if (utime(TESTFILE, NULL) != 0) {
+ printf("failure: utime [\n"
+ "utime(NULL) failed - %s\n]\n",
+ strerror(errno));
+ close(fd);
+ return false;
+ }
+
+ if (fstat(fd, &st3) != 0) {
+ printf("failure: utime [\n"
+ "fstat (3) failed - %s\n]\n",
+ strerror(errno));
+ close(fd);
+ return false;
+ }
+
+#define CMP_VAL(a,c,b) do { \
+ if (a c b) { \
+ printf("failure: utime [\n" \
+ "%s: %s(%d) %s %s(%d)\n]\n", \
+ __location__, \
+ #a, (int)a, #c, #b, (int)b); \
+ close(fd); \
+ return false; \
+ } \
+} while(0)
+#define EQUAL_VAL(a,b) CMP_VAL(a,!=,b)
+#define GREATER_VAL(a,b) CMP_VAL(a,<=,b)
+#define LESSER_VAL(a,b) CMP_VAL(a,>=,b)
+
+ EQUAL_VAL(st2.st_atime, st1.st_atime + 300);
+ EQUAL_VAL(st2.st_mtime, st1.st_mtime - 300);
+ LESSER_VAL(st3.st_atime, st2.st_atime);
+ GREATER_VAL(st3.st_mtime, st2.st_mtime);
+
+#undef CMP_VAL
+#undef EQUAL_VAL
+#undef GREATER_VAL
+#undef LESSER_VAL
+
+ unlink(TESTFILE);
+ printf("success: utime\n");
+ close(fd);
+ return true;
+}
+
+static int test_utimes(void)
+{
+ struct timeval tv[2];
+ struct stat st1, st2;
+ int fd;
+
+ printf("test: utimes\n");
+ unlink(TESTFILE);
+
+ fd = open(TESTFILE, O_RDWR|O_CREAT, 0600);
+ if (fd == -1) {
+ printf("failure: utimes [\n"
+ "creating '%s' failed - %s\n]\n",
+ TESTFILE, strerror(errno));
+ return false;
+ }
+
+ if (fstat(fd, &st1) != 0) {
+ printf("failure: utimes [\n"
+ "fstat (1) failed - %s\n]\n",
+ strerror(errno));
+ close(fd);
+ return false;
+ }
+
+ ZERO_STRUCT(tv);
+ tv[0].tv_sec = st1.st_atime + 300;
+ tv[1].tv_sec = st1.st_mtime - 300;
+ if (utimes(TESTFILE, tv) != 0) {
+ printf("failure: utimes [\n"
+ "utimes(tv) failed - %s\n]\n",
+ strerror(errno));
+ close(fd);
+ return false;
+ }
+
+ if (fstat(fd, &st2) != 0) {
+ printf("failure: utimes [\n"
+ "fstat (2) failed - %s\n]\n",
+ strerror(errno));
+ close(fd);
+ return false;
+ }
+
+#define EQUAL_VAL(a,b) do { \
+ if (a != b) { \
+ printf("failure: utimes [\n" \
+ "%s: %s(%d) != %s(%d)\n]\n", \
+ __location__, \
+ #a, (int)a, #b, (int)b); \
+ close(fd); \
+ return false; \
+ } \
+} while(0)
+
+ EQUAL_VAL(st2.st_atime, st1.st_atime + 300);
+ EQUAL_VAL(st2.st_mtime, st1.st_mtime - 300);
+
+#undef EQUAL_VAL
+
+ unlink(TESTFILE);
+ printf("success: utimes\n");
+ close(fd);
+ return true;
+}
+
+static int test_memmem(void)
+{
+ char *s;
+
+ printf("test: memmem\n");
+
+ s = (char *)memmem("foo", 3, "fo", 2);
+ if (strcmp(s, "foo") != 0) {
+ printf(__location__ ": Failed memmem\n");
+ return false;
+ }
+
+ s = (char *)memmem("foo", 3, "", 0);
+ /* it is allowable for this to return NULL (as happens on
+ FreeBSD) */
+ if (s && strcmp(s, "foo") != 0) {
+ printf(__location__ ": Failed memmem\n");
+ return false;
+ }
+
+ s = (char *)memmem("foo", 4, "o", 1);
+ if (strcmp(s, "oo") != 0) {
+ printf(__location__ ": Failed memmem\n");
+ return false;
+ }
+
+ s = (char *)memmem("foobarfodx", 11, "fod", 3);
+ if (strcmp(s, "fodx") != 0) {
+ printf(__location__ ": Failed memmem\n");
+ return false;
+ }
+
+ printf("success: memmem\n");
+
+ return true;
+}
+
+
+bool torture_local_replace(struct torture_context *ctx)
+{
+ bool ret = true;
+ ret &= test_ftruncate();
+ ret &= test_strlcpy();
+ ret &= test_strlcat();
+ ret &= test_mktime();
+ ret &= test_initgroups();
+ ret &= test_memmove();
+ ret &= test_strdup();
+ ret &= test_setlinebuf();
+ ret &= test_vsyslog();
+ ret &= test_timegm();
+ ret &= test_setenv();
+ ret &= test_strndup();
+ ret &= test_strnlen();
+ ret &= test_waitpid();
+ ret &= test_seteuid();
+ ret &= test_setegid();
+ ret &= test_asprintf();
+ ret &= test_snprintf();
+ ret &= test_vasprintf();
+ ret &= test_vsnprintf();
+ ret &= test_opendir();
+ ret &= test_readdir();
+ ret &= test_telldir();
+ ret &= test_seekdir();
+ ret &= test_dlopen();
+ ret &= test_chroot();
+ ret &= test_bzero();
+ ret &= test_strerror();
+ ret &= test_errno();
+ ret &= test_mkdtemp();
+ ret &= test_mkstemp();
+ ret &= test_pread();
+ ret &= test_pwrite();
+ ret &= test_inet_ntoa();
+ ret &= test_strtoll();
+ ret &= test_strtoull();
+ ret &= test_va_copy();
+ ret &= test_FUNCTION();
+ ret &= test_MIN();
+ ret &= test_MAX();
+ ret &= test_socketpair();
+ ret &= test_strptime();
+ ret &= test_getifaddrs();
+ ret &= test_utime();
+ ret &= test_utimes();
+ ret &= test_memmem();
+
+ return ret;
+}
diff --git a/lib/replace/timegm.c b/lib/replace/timegm.c
new file mode 100644
index 0000000..395c684
--- /dev/null
+++ b/lib/replace/timegm.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 1997 Kungliga Tekniska Högskolan
+ * (Royal Institute of Technology, Stockholm, Sweden).
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Institute nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ adapted for Samba4 by Andrew Tridgell
+*/
+
+#include "replace.h"
+#include "system/time.h"
+
+static int is_leap(unsigned y)
+{
+ y += 1900;
+ return (y % 4) == 0 && ((y % 100) != 0 || (y % 400) == 0);
+}
+
+time_t rep_timegm(struct tm *tm)
+{
+ static const unsigned ndays[2][12] ={
+ {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
+ {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}};
+ time_t res = 0;
+ unsigned i;
+
+ if (tm->tm_mon > 12 ||
+ tm->tm_mon < 0 ||
+ tm->tm_mday > 31 ||
+ tm->tm_min > 60 ||
+ tm->tm_sec > 60 ||
+ tm->tm_hour > 24) {
+ /* invalid tm structure */
+ return 0;
+ }
+
+ for (i = 70; i < tm->tm_year; ++i)
+ res += is_leap(i) ? 366 : 365;
+
+ for (i = 0; i < tm->tm_mon; ++i)
+ res += ndays[is_leap(tm->tm_year)][i];
+ res += tm->tm_mday - 1;
+ res *= 24;
+ res += tm->tm_hour;
+ res *= 60;
+ res += tm->tm_min;
+ res *= 60;
+ res += tm->tm_sec;
+ return res;
+}
diff --git a/lib/replace/win32_replace.h b/lib/replace/win32_replace.h
new file mode 100644
index 0000000..9901e72
--- /dev/null
+++ b/lib/replace/win32_replace.h
@@ -0,0 +1,159 @@
+#ifndef _WIN32_REPLACE_H
+#define _WIN32_REPLACE_H
+
+#ifdef HAVE_WINSOCK2_H
+#include <winsock2.h>
+#endif
+
+#ifdef HAVE_WS2TCPIP_H
+#include <ws2tcpip.h>
+#endif
+
+#ifdef HAVE_WINDOWS_H
+#include <windows.h>
+#endif
+
+/* Map BSD Socket errorcodes to the WSA errorcodes (if possible) */
+
+#define EAFNOSUPPORT WSAEAFNOSUPPORT
+#define ECONNREFUSED WSAECONNREFUSED
+#define EINPROGRESS WSAEINPROGRESS
+#define EMSGSIZE WSAEMSGSIZE
+#define ENOBUFS WSAENOBUFS
+#define ENOTSOCK WSAENOTSOCK
+#define ENETUNREACH WSAENETUNREACH
+#define ENOPROTOOPT WSAENOPROTOOPT
+#define ENOTCONN WSAENOTCONN
+#define ENOTSUP 134
+
+/* We undefine the following constants due to conflicts with the w32api headers
+ * and the Windows Platform SDK/DDK.
+ */
+
+#undef interface
+
+#undef ERROR_INVALID_PARAMETER
+#undef ERROR_INSUFFICIENT_BUFFER
+#undef ERROR_INVALID_DATATYPE
+
+#undef FILE_GENERIC_READ
+#undef FILE_GENERIC_WRITE
+#undef FILE_GENERIC_EXECUTE
+#undef FILE_ATTRIBUTE_READONLY
+#undef FILE_ATTRIBUTE_HIDDEN
+#undef FILE_ATTRIBUTE_SYSTEM
+#undef FILE_ATTRIBUTE_DIRECTORY
+#undef FILE_ATTRIBUTE_ARCHIVE
+#undef FILE_ATTRIBUTE_DEVICE
+#undef FILE_ATTRIBUTE_NORMAL
+#undef FILE_ATTRIBUTE_TEMPORARY
+#undef FILE_ATTRIBUTE_REPARSE_POINT
+#undef FILE_ATTRIBUTE_COMPRESSED
+#undef FILE_ATTRIBUTE_OFFLINE
+#undef FILE_ATTRIBUTE_ENCRYPTED
+#undef FILE_FLAG_WRITE_THROUGH
+#undef FILE_FLAG_NO_BUFFERING
+#undef FILE_FLAG_RANDOM_ACCESS
+#undef FILE_FLAG_SEQUENTIAL_SCAN
+#undef FILE_FLAG_DELETE_ON_CLOSE
+#undef FILE_FLAG_BACKUP_SEMANTICS
+#undef FILE_FLAG_POSIX_SEMANTICS
+#undef FILE_TYPE_DISK
+#undef FILE_TYPE_UNKNOWN
+#undef FILE_CASE_SENSITIVE_SEARCH
+#undef FILE_CASE_PRESERVED_NAMES
+#undef FILE_UNICODE_ON_DISK
+#undef FILE_PERSISTENT_ACLS
+#undef FILE_FILE_COMPRESSION
+#undef FILE_VOLUME_QUOTAS
+#undef FILE_VOLUME_IS_COMPRESSED
+#undef FILE_NOTIFY_CHANGE_FILE_NAME
+#undef FILE_NOTIFY_CHANGE_DIR_NAME
+#undef FILE_NOTIFY_CHANGE_ATTRIBUTES
+#undef FILE_NOTIFY_CHANGE_SIZE
+#undef FILE_NOTIFY_CHANGE_LAST_WRITE
+#undef FILE_NOTIFY_CHANGE_LAST_ACCESS
+#undef FILE_NOTIFY_CHANGE_CREATION
+#undef FILE_NOTIFY_CHANGE_EA
+#undef FILE_NOTIFY_CHANGE_SECURITY
+#undef FILE_NOTIFY_CHANGE_STREAM_NAME
+#undef FILE_NOTIFY_CHANGE_STREAM_SIZE
+#undef FILE_NOTIFY_CHANGE_STREAM_WRITE
+#undef FILE_NOTIFY_CHANGE_NAME
+
+#undef PRINTER_ATTRIBUTE_QUEUED
+#undef PRINTER_ATTRIBUTE_DIRECT
+#undef PRINTER_ATTRIBUTE_DEFAULT
+#undef PRINTER_ATTRIBUTE_SHARED
+#undef PRINTER_ATTRIBUTE_NETWORK
+#undef PRINTER_ATTRIBUTE_HIDDEN
+#undef PRINTER_ATTRIBUTE_LOCAL
+#undef PRINTER_ATTRIBUTE_ENABLE_DEVQ
+#undef PRINTER_ATTRIBUTE_KEEPPRINTEDJOBS
+#undef PRINTER_ATTRIBUTE_DO_COMPLETE_FIRST
+#undef PRINTER_ATTRIBUTE_WORK_OFFLINE
+#undef PRINTER_ATTRIBUTE_ENABLE_BIDI
+#undef PRINTER_ATTRIBUTE_RAW_ONLY
+#undef PRINTER_ATTRIBUTE_PUBLISHED
+#undef PRINTER_ENUM_DEFAULT
+#undef PRINTER_ENUM_LOCAL
+#undef PRINTER_ENUM_CONNECTIONS
+#undef PRINTER_ENUM_FAVORITE
+#undef PRINTER_ENUM_NAME
+#undef PRINTER_ENUM_REMOTE
+#undef PRINTER_ENUM_SHARED
+#undef PRINTER_ENUM_NETWORK
+#undef PRINTER_ENUM_EXPAND
+#undef PRINTER_ENUM_CONTAINER
+#undef PRINTER_ENUM_ICON1
+#undef PRINTER_ENUM_ICON2
+#undef PRINTER_ENUM_ICON3
+#undef PRINTER_ENUM_ICON4
+#undef PRINTER_ENUM_ICON5
+#undef PRINTER_ENUM_ICON6
+#undef PRINTER_ENUM_ICON7
+#undef PRINTER_ENUM_ICON8
+#undef PRINTER_STATUS_PAUSED
+#undef PRINTER_STATUS_ERROR
+#undef PRINTER_STATUS_PENDING_DELETION
+#undef PRINTER_STATUS_PAPER_JAM
+#undef PRINTER_STATUS_PAPER_OUT
+#undef PRINTER_STATUS_MANUAL_FEED
+#undef PRINTER_STATUS_PAPER_PROBLEM
+#undef PRINTER_STATUS_OFFLINE
+#undef PRINTER_STATUS_IO_ACTIVE
+#undef PRINTER_STATUS_BUSY
+#undef PRINTER_STATUS_PRINTING
+#undef PRINTER_STATUS_OUTPUT_BIN_FULL
+#undef PRINTER_STATUS_NOT_AVAILABLE
+#undef PRINTER_STATUS_WAITING
+#undef PRINTER_STATUS_PROCESSING
+#undef PRINTER_STATUS_INITIALIZING
+#undef PRINTER_STATUS_WARMING_UP
+#undef PRINTER_STATUS_TONER_LOW
+#undef PRINTER_STATUS_NO_TONER
+#undef PRINTER_STATUS_PAGE_PUNT
+#undef PRINTER_STATUS_USER_INTERVENTION
+#undef PRINTER_STATUS_OUT_OF_MEMORY
+#undef PRINTER_STATUS_DOOR_OPEN
+#undef PRINTER_STATUS_SERVER_UNKNOWN
+#undef PRINTER_STATUS_POWER_SAVE
+
+#undef DWORD
+#undef HKEY_CLASSES_ROOT
+#undef HKEY_CURRENT_USER
+#undef HKEY_LOCAL_MACHINE
+#undef HKEY_USERS
+#undef HKEY_PERFORMANCE_DATA
+#undef HKEY_CURRENT_CONFIG
+#undef HKEY_DYN_DATA
+#undef REG_DWORD
+#undef REG_QWORD
+
+#undef SERVICE_STATE_ALL
+
+#undef SE_GROUP_MANDATORY
+#undef SE_GROUP_ENABLED_BY_DEFAULT
+#undef SE_GROUP_ENABLED
+
+#endif /* _WIN32_REPLACE_H */
diff --git a/lib/replace/wscript b/lib/replace/wscript
new file mode 100644
index 0000000..1b156fa
--- /dev/null
+++ b/lib/replace/wscript
@@ -0,0 +1,730 @@
+#!/usr/bin/env python
+
+APPNAME = 'libreplace'
+VERSION = '1.2.1'
+
+blddir = 'bin'
+
+import sys, os
+
+# find the buildtools directory
+srcdir = '.'
+while not os.path.exists(srcdir+'/buildtools') and len(srcdir.split('/')) < 5:
+ srcdir = srcdir + '/..'
+sys.path.insert(0, srcdir + '/buildtools/wafsamba')
+
+import wafsamba, samba_dist
+import Options
+
+samba_dist.DIST_DIRS('lib/replace buildtools:buildtools third_party/waf:third_party/waf')
+
+def set_options(opt):
+ opt.BUILTIN_DEFAULT('NONE')
+ opt.PRIVATE_EXTENSION_DEFAULT('')
+ opt.RECURSE('buildtools/wafsamba')
+
+@wafsamba.runonce
+def configure(conf):
+ conf.RECURSE('buildtools/wafsamba')
+
+ conf.env.standalone_replace = conf.IN_LAUNCH_DIR()
+
+ conf.DEFINE('HAVE_LIBREPLACE', 1)
+ conf.DEFINE('LIBREPLACE_NETWORK_CHECKS', 1)
+
+ conf.CHECK_HEADERS('linux/types.h crypt.h locale.h acl/libacl.h compat.h')
+ conf.CHECK_HEADERS('acl/libacl.h attr/xattr.h compat.h ctype.h dustat.h')
+ conf.CHECK_HEADERS('fcntl.h fnmatch.h glob.h history.h krb5.h langinfo.h')
+ conf.CHECK_HEADERS('libaio.h locale.h ndir.h pwd.h')
+ conf.CHECK_HEADERS('shadow.h sys/acl.h')
+ conf.CHECK_HEADERS('sys/attributes.h attr/attributes.h sys/capability.h sys/dir.h sys/epoll.h')
+ conf.CHECK_HEADERS('port.h')
+ conf.CHECK_HEADERS('sys/fcntl.h sys/filio.h sys/filsys.h sys/fs/s5param.h sys/fs/vx/quota.h')
+ conf.CHECK_HEADERS('sys/id.h sys/ioctl.h sys/ipc.h sys/mman.h sys/mode.h sys/ndir.h sys/priv.h')
+ conf.CHECK_HEADERS('sys/resource.h sys/security.h sys/shm.h sys/statfs.h sys/statvfs.h sys/termio.h')
+ conf.CHECK_HEADERS('sys/vfs.h sys/xattr.h termio.h termios.h sys/file.h')
+ conf.CHECK_HEADERS('sys/ucontext.h sys/wait.h sys/stat.h malloc.h grp.h')
+ conf.CHECK_HEADERS('sys/select.h setjmp.h utime.h sys/syslog.h syslog.h')
+ conf.CHECK_HEADERS('stdarg.h vararg.h sys/mount.h mntent.h')
+ conf.CHECK_HEADERS('stropts.h unix.h string.h strings.h sys/param.h limits.h')
+ conf.CHECK_HEADERS('''sys/socket.h netinet/in.h netdb.h arpa/inet.h netinet/in_systm.h
+ netinet/ip.h netinet/tcp.h netinet/in_ip.h
+ sys/sockio.h sys/un.h''', together=True)
+ conf.CHECK_HEADERS('sys/uio.h ifaddrs.h direct.h dirent.h')
+ conf.CHECK_HEADERS('windows.h winsock2.h ws2tcpip.h')
+ conf.CHECK_HEADERS('errno.h')
+ conf.CHECK_HEADERS('getopt.h iconv.h')
+ conf.CHECK_HEADERS('memory.h nss.h sasl/sasl.h')
+
+ conf.CHECK_FUNCS_IN('inotify_init', 'inotify', checklibc=True,
+ headers='sys/inotify.h')
+
+ conf.CHECK_HEADERS('security/pam_appl.h zlib.h asm/unistd.h')
+ conf.CHECK_HEADERS('aio.h sys/unistd.h rpc/rpc.h rpc/nettype.h alloca.h float.h')
+
+ conf.CHECK_HEADERS('rpcsvc/nis.h rpcsvc/ypclnt.h sys/sysctl.h')
+ conf.CHECK_HEADERS('sys/fileio.h sys/filesys.h sys/dustat.h sys/sysmacros.h')
+ conf.CHECK_HEADERS('xfs/libxfs.h netgroup.h')
+
+ conf.CHECK_CODE('', headers='rpc/rpc.h rpcsvc/yp_prot.h', define='HAVE_RPCSVC_YP_PROT_H')
+
+ conf.CHECK_HEADERS('valgrind.h valgrind/valgrind.h valgrind/memcheck.h')
+ conf.CHECK_HEADERS('nss_common.h nsswitch.h ns_api.h')
+ conf.CHECK_HEADERS('sys/extattr.h sys/ea.h sys/proplist.h sys/cdefs.h')
+ conf.CHECK_HEADERS('utmp.h utmpx.h lastlog.h malloc.h')
+ conf.CHECK_HEADERS('syscall.h sys/syscall.h inttypes.h')
+ conf.CHECK_HEADERS('sys/atomic.h')
+ conf.CHECK_HEADERS('libgen.h')
+
+ # Check for process set name support
+ conf.CHECK_CODE('''
+ #include <sys/prctl.h>
+ int main(void) {
+ prctl(0);
+ return 0;
+ }
+ ''',
+ 'HAVE_PRCTL',
+ headers='sys/prctl.h',
+ msg='Checking for prctl syscall')
+
+ conf.CHECK_CODE('''
+ #include <unistd.h>
+ #ifdef HAVE_FCNTL_H
+ #include <fcntl.h>
+ #endif
+ int main(void) { int fd = open("/dev/null", O_DIRECT); }
+ ''',
+ define='HAVE_OPEN_O_DIRECT',
+ addmain=False,
+ msg='Checking for O_DIRECT flag to open(2)')
+
+ conf.CHECK_TYPES('"long long" intptr_t uintptr_t ptrdiff_t comparison_fn_t')
+ conf.CHECK_TYPE('_Bool', define='HAVE__Bool')
+ conf.CHECK_TYPE('bool', define='HAVE_BOOL')
+
+ conf.CHECK_TYPE('int8_t', 'char')
+ conf.CHECK_TYPE('uint8_t', 'unsigned char')
+ conf.CHECK_TYPE('int16_t', 'short')
+ conf.CHECK_TYPE('uint16_t', 'unsigned short')
+ conf.CHECK_TYPE('int32_t', 'int')
+ conf.CHECK_TYPE('uint32_t', 'unsigned')
+ conf.CHECK_TYPE('int64_t', 'long long')
+ conf.CHECK_TYPE('uint64_t', 'unsigned long long')
+ conf.CHECK_TYPE('size_t', 'unsigned int')
+ conf.CHECK_TYPE('ssize_t', 'int')
+ conf.CHECK_TYPE('ino_t', 'unsigned')
+ conf.CHECK_TYPE('loff_t', 'off_t')
+ conf.CHECK_TYPE('offset_t', 'loff_t')
+ conf.CHECK_TYPE('volatile int', define='HAVE_VOLATILE')
+ conf.CHECK_TYPE('uint_t', 'unsigned int')
+ conf.CHECK_TYPE('blksize_t', 'long', headers='sys/types.h sys/stat.h unistd.h')
+ conf.CHECK_TYPE('blkcnt_t', 'long', headers='sys/types.h sys/stat.h unistd.h')
+
+ conf.CHECK_SIZEOF('bool char int "long long" long short size_t ssize_t')
+ conf.CHECK_SIZEOF('int8_t uint8_t int16_t uint16_t int32_t uint32_t int64_t uint64_t')
+ conf.CHECK_SIZEOF('void*', define='SIZEOF_VOID_P')
+ conf.CHECK_SIZEOF('off_t dev_t ino_t time_t')
+
+ conf.CHECK_TYPES('socklen_t', headers='sys/socket.h')
+ conf.CHECK_TYPE_IN('struct ifaddrs', 'ifaddrs.h')
+ conf.CHECK_TYPE_IN('struct addrinfo', 'netdb.h')
+ conf.CHECK_TYPE_IN('struct sockaddr', 'sys/socket.h')
+ conf.CHECK_CODE('struct sockaddr_in6 x', define='HAVE_STRUCT_SOCKADDR_IN6',
+ headers='sys/socket.h netdb.h netinet/in.h')
+ conf.CHECK_TYPE_IN('struct sockaddr_storage', 'sys/socket.h')
+ conf.CHECK_TYPE_IN('sa_family_t', 'sys/socket.h')
+
+ conf.CHECK_TYPE_IN('sig_atomic_t', 'signal.h', define='HAVE_SIG_ATOMIC_T_TYPE')
+
+ conf.CHECK_FUNCS_IN('''inet_ntoa inet_aton inet_ntop inet_pton connect gethostbyname
+ getaddrinfo getnameinfo freeaddrinfo gai_strerror socketpair''',
+ 'socket nsl', checklibc=True,
+ headers='sys/socket.h netinet/in.h arpa/inet.h netdb.h')
+
+ # Some old Linux systems have broken header files and
+ # miss the IPV6_V6ONLY define in netinet/in.h,
+ # but have it in linux/in6.h.
+ # We can't include both files so we just check if the value
+ # if defined and do the replacement in system/network.h
+ if not conf.CHECK_VARIABLE('IPV6_V6ONLY',
+ headers='sys/socket.h netdb.h netinet/in.h'):
+ conf.CHECK_CODE('''
+ #include <linux/in6.h>
+ #if (IPV6_V6ONLY != 26)
+ #error no IPV6_V6ONLY support on linux
+ #endif
+ int main(void) { return IPV6_V6ONLY; }
+ ''',
+ define='HAVE_LINUX_IPV6_V6ONLY_26',
+ addmain=False,
+ msg='Checking for IPV6_V6ONLY in linux/in6.h',
+ local_include=False)
+
+ conf.CHECK_CODE('''
+ struct sockaddr_storage sa_store;
+ struct addrinfo *ai = NULL;
+ struct in6_addr in6addr;
+ int idx = if_nametoindex("iface1");
+ int s = socket(AF_INET6, SOCK_STREAM, 0);
+ int ret = getaddrinfo(NULL, NULL, NULL, &ai);
+ if (ret != 0) {
+ const char *es = gai_strerror(ret);
+ }
+ freeaddrinfo(ai);
+ {
+ int val = 1;
+ #ifdef HAVE_LINUX_IPV6_V6ONLY_26
+ #define IPV6_V6ONLY 26
+ #endif
+ ret = setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY,
+ (const void *)&val, sizeof(val));
+ }
+ ''',
+ define='HAVE_IPV6',
+ lib='nsl socket',
+ headers='sys/socket.h netdb.h netinet/in.h')
+
+ if conf.CONFIG_SET('HAVE_SYS_UCONTEXT_H') and conf.CONFIG_SET('HAVE_SIGNAL_H'):
+ conf.CHECK_CODE('''
+ ucontext_t uc;
+ sigaddset(&uc.uc_sigmask, SIGUSR1);
+ ''',
+ 'HAVE_UCONTEXT_T',
+ msg="Checking whether we have ucontext_t",
+ headers='signal.h sys/ucontext.h')
+
+ # Check for atomic builtins. */
+ conf.CHECK_CODE('''
+ int main(void) {
+ int i;
+ (void)__sync_fetch_and_add(&i, 1);
+ return 0;
+ }
+ ''',
+ 'HAVE___SYNC_FETCH_AND_ADD',
+ msg='Checking for __sync_fetch_and_add compiler builtin')
+
+ conf.CHECK_CODE('''
+ #include <stdint.h>
+ #include <sys/atomic.h>
+ int main(void) {
+ int32_t i;
+ atomic_add_32(&i, 1);
+ return 0;
+ }
+ ''',
+ 'HAVE_ATOMIC_ADD_32',
+ headers='stdint.h sys/atomic.h',
+ msg='Checking for atomic_add_32 compiler builtin')
+
+ # these may be builtins, so we need the link=False strategy
+ conf.CHECK_FUNCS('strdup memmem printf memset memcpy memmove strcpy strncpy bzero', link=False)
+
+ # See https://bugzilla.samba.org/show_bug.cgi?id=1097
+ #
+ # Ported in from autoconf where it was added with this commit:
+ # commit 804cfb20a067b4b687089dc72a8271b3abf20f31
+ # Author: Simo Sorce <idra@samba.org>
+ # Date: Wed Aug 25 14:24:16 2004 +0000
+ # r2070: Let's try to overload srnlen and strndup for AIX where they are natly broken.
+
+ host_os = sys.platform
+ if host_os.rfind('aix') > -1:
+ conf.DEFINE('BROKEN_STRNLEN', 1)
+ conf.DEFINE('BROKEN_STRNDUP', 1)
+
+ conf.CHECK_FUNCS('shl_load shl_unload shl_findsym')
+ conf.CHECK_FUNCS('pipe strftime srandom random srand rand usleep setbuffer')
+ conf.CHECK_FUNCS('lstat getpgrp utime utimes setuid seteuid setreuid setresuid setgid setegid')
+ conf.CHECK_FUNCS('setregid setresgid chroot strerror vsyslog setlinebuf mktime')
+ conf.CHECK_FUNCS('ftruncate chsize rename waitpid wait4')
+ conf.CHECK_FUNCS('initgroups pread pwrite strndup strcasestr')
+ conf.CHECK_FUNCS('strtok_r mkdtemp dup2 dprintf vdprintf isatty chown lchown')
+ conf.CHECK_FUNCS('link readlink symlink realpath snprintf vsnprintf')
+ conf.CHECK_FUNCS('asprintf vasprintf setenv unsetenv strnlen strtoull __strtoull')
+ conf.CHECK_FUNCS('strtouq strtoll __strtoll strtoq memalign posix_memalign')
+ conf.CHECK_FUNCS('prctl dirname basename')
+
+ # libbsd on some platforms provides strlcpy and strlcat
+ if not conf.CHECK_FUNCS('strlcpy strlcat'):
+ conf.CHECK_FUNCS_IN('strlcpy strlcat', 'bsd', headers='bsd/string.h',
+ checklibc=True)
+ if not conf.CHECK_FUNCS('getpeereid'):
+ conf.CHECK_FUNCS_IN('getpeereid', 'bsd', headers='sys/types.h bsd/unistd.h')
+ if not conf.CHECK_FUNCS_IN('setproctitle', 'setproctitle', headers='setproctitle.h'):
+ conf.CHECK_FUNCS_IN('setproctitle', 'bsd', headers='sys/types.h bsd/unistd.h')
+
+ conf.CHECK_CODE('''
+ struct ucred cred;
+ socklen_t cred_len;
+ int ret = getsockopt(0, SOL_SOCKET, SO_PEERCRED, &cred, &cred_len);''',
+ 'HAVE_PEERCRED',
+ msg="Checking whether we can use SO_PEERCRED to get socket credentials",
+ headers='sys/types.h sys/socket.h')
+
+ #Some OS (ie. freebsd) return EINVAL if the convertion could not be done, it's not what we expect
+ #Let's detect those cases
+ if conf.CONFIG_SET('HAVE_STRTOLL'):
+ conf.CHECK_CODE('''
+ long long nb = strtoll("Text", NULL, 0);
+ if (errno == EINVAL) {
+ return 0;
+ } else {
+ return 1;
+ }
+ ''',
+ msg="Checking correct behavior of strtoll",
+ headers = 'errno.h',
+ execute = True,
+ define = 'HAVE_BSD_STRTOLL',
+ )
+ conf.CHECK_FUNCS('if_nametoindex strerror_r')
+ conf.CHECK_FUNCS('getdirentries getdents syslog')
+ conf.CHECK_FUNCS('gai_strerror get_current_dir_name')
+ conf.CHECK_FUNCS('timegm getifaddrs freeifaddrs mmap setgroups syscall setsid')
+ conf.CHECK_FUNCS('getgrent_r getgrgid_r getgrnam_r getgrouplist getpagesize')
+ conf.CHECK_FUNCS('getpwent_r getpwnam_r getpwuid_r epoll_create')
+ conf.CHECK_FUNCS('port_create')
+
+ conf.SET_TARGET_TYPE('attr', 'EMPTY')
+
+ xattr_headers='sys/attributes.h attr/xattr.h sys/xattr.h'
+
+ conf.CHECK_FUNCS_IN('''
+fgetxattr flistea flistxattr
+fremovexattr fsetxattr getxattr
+listxattr removexattr setxattr
+''', 'attr', checklibc=True, headers=xattr_headers)
+
+ # We need to check for linux xattrs first, as we do not wish to link to -lattr
+ # (the XFS compat API) on Linux systems with the native xattr API
+ if not conf.CONFIG_SET('HAVE_GETXATTR'):
+ conf.CHECK_FUNCS_IN('''
+attr_get attr_getf attr_list attr_listf attropen attr_remove
+attr_removef attr_set attr_setf extattr_delete_fd extattr_delete_file
+extattr_get_fd extattr_get_file extattr_list_fd extattr_list_file
+extattr_set_fd extattr_set_file fgetea
+fremoveea fsetea getea listea
+removeea setea
+''', 'attr', checklibc=True, headers=xattr_headers)
+
+ if (conf.CONFIG_SET('HAVE_ATTR_LISTF') or
+ conf.CONFIG_SET('HAVE_EXTATTR_LIST_FD') or
+ conf.CONFIG_SET('HAVE_FLISTEA') or
+ conf.CONFIG_SET('HAVE_FLISTXATTR')):
+ conf.DEFINE('HAVE_XATTR_SUPPORT', 1)
+
+ # Darwin has extra options to xattr-family functions
+ conf.CHECK_CODE('getxattr(NULL, NULL, NULL, 0, 0, 0)',
+ headers=xattr_headers, local_include=False,
+ define='XATTR_ADDITIONAL_OPTIONS',
+ msg="Checking whether xattr interface takes additional options")
+
+ conf.CHECK_FUNCS_IN('dlopen dlsym dlerror dlclose', 'dl',
+ checklibc=True, headers='dlfcn.h dl.h')
+
+ conf.CHECK_C_PROTOTYPE('dlopen', 'void *dlopen(const char* filename, unsigned int flags)',
+ define='DLOPEN_TAKES_UNSIGNED_FLAGS', headers='dlfcn.h dl.h')
+
+ if conf.CHECK_FUNCS_IN('fdatasync', 'rt', checklibc=True):
+ # some systems are missing the declaration
+ conf.CHECK_DECLS('fdatasync')
+
+ if conf.CHECK_FUNCS_IN('clock_gettime', 'rt', checklibc=True):
+ for c in ['CLOCK_MONOTONIC', 'CLOCK_PROCESS_CPUTIME_ID', 'CLOCK_REALTIME']:
+ conf.CHECK_CODE('''
+ #if TIME_WITH_SYS_TIME
+ # include <sys/time.h>
+ # include <time.h>
+ #else
+ # if HAVE_SYS_TIME_H
+ # include <sys/time.h>
+ # else
+ # include <time.h>
+ # endif
+ #endif
+ clockid_t clk = %s''' % c,
+ 'HAVE_%s' % c,
+ msg='Checking whether the clock_gettime clock ID %s is available' % c)
+
+ conf.CHECK_TYPE('struct timespec', headers='sys/time.h time.h')
+
+ # these headers need to be tested as a group on freebsd
+ conf.CHECK_HEADERS(headers='sys/socket.h net/if.h', together=True)
+ conf.CHECK_HEADERS(headers='netinet/in.h arpa/nameser.h resolv.h', together=True)
+ conf.CHECK_FUNCS_IN('res_search', 'resolv', checklibc=True,
+ headers='netinet/in.h arpa/nameser.h resolv.h')
+
+
+ # try to find libintl (if --without-gettext is not given)
+ conf.env.intl_libs=''
+ if not Options.options.disable_gettext:
+ # any extra path given to look at?
+ if not Options.options.gettext_location == 'None':
+ conf.env['CFLAGS'].extend(["-I%s" % Options.options.gettext_location]);
+ conf.env['LDFLAGS'].extend(["-L%s" % Options.options.gettext_location]);
+ else:
+ conf.env['CFLAGS'].extend(["-I/usr/local"]);
+ conf.env['LDFLAGS'].extend(["-L/usr/local"]);
+ conf.CHECK_HEADERS('libintl.h')
+ conf.CHECK_LIB('intl')
+ conf.CHECK_DECLS('dgettext gettext bindtextdomain textdomain bind_textdomain_codeset', headers="libintl.h")
+ # *textdomain functions are not strictly necessary
+ conf.CHECK_FUNCS_IN('bindtextdomain textdomain bind_textdomain_codeset',
+ '', checklibc=True, headers='libintl.h')
+ # gettext and dgettext must exist
+ # on some systems (the ones with glibc, those are in libc)
+ if conf.CHECK_FUNCS_IN('dgettext gettext', '', checklibc=True, headers='libintl.h'):
+ # save for dependency definitions
+ conf.env.intl_libs=''
+ # others (e.g. FreeBSD) have seperate libintl
+ elif conf.CHECK_FUNCS_IN('dgettext gettext', 'intl', checklibc=False, headers='libintl.h'):
+ # save for dependency definitions
+ conf.env.intl_libs='intl'
+ # recheck with libintl
+ conf.CHECK_FUNCS_IN('bindtextdomain textdomain bind_textdomain_codeset',
+ 'intl', checklibc=False, headers='libintl.h')
+ else:
+ # Some hosts need lib iconv for linking with lib intl
+ # So we try with flags just in case it helps.
+ oldflags = list(conf.env['EXTRA_LDFLAGS']);
+ conf.env['EXTRA_LDFLAGS'].extend(["-liconv"])
+ conf.CHECK_FUNCS_IN('dgettext gettext bindtextdomain textdomain bind_textdomain_codeset',
+ 'intl', checklibc=False, headers='libintl.h')
+ conf.env['EXTRA_LDFLAGS'] = oldflags
+ if conf.env['HAVE_GETTEXT'] and conf.env['HAVE_DGETTEXT']:
+ # save for dependency definitions
+ conf.env.intl_libs='iconv intl'
+
+ # did we find both prototypes and a library to link against?
+ # if not, unset the detected values (see Bug #9911)
+ if not (conf.env['HAVE_GETTEXT'] and conf.env['HAVE_DECL_GETTEXT']):
+ conf.undefine('HAVE_GETTEXT')
+ conf.undefine('HAVE_DECL_GETTEXT')
+ if not (conf.env['HAVE_DGETTEXT'] and conf.env['HAVE_DECL_DGETTEXT']):
+ conf.undefine('HAVE_DGETTEXT')
+ conf.undefine('HAVE_DECL_DGETTEXT')
+
+ # did the user insist on gettext (--with-gettext)?
+ if Options.options.gettext_location != 'None' and (not conf.env['HAVE_GETTEXT'] or not conf.env['HAVE_DGETTEXT']):
+ conf.fatal('library gettext not found at specified location')
+
+ conf.CHECK_FUNCS_IN('pthread_create', 'pthread', checklibc=True, headers='pthread.h')
+
+ PTHREAD_CFLAGS='error'
+ PTHREAD_LDFLAGS='error'
+
+ if PTHREAD_LDFLAGS == 'error':
+ if conf.CHECK_FUNCS_IN('pthread_attr_init', 'pthread'):
+ PTHREAD_CFLAGS='-D_REENTRANT -D_POSIX_PTHREAD_SEMANTICS'
+ PTHREAD_LDFLAGS='-lpthread'
+ if PTHREAD_LDFLAGS == 'error':
+ if conf.CHECK_FUNCS_IN('pthread_attr_init', 'pthreads'):
+ PTHREAD_CFLAGS='-D_THREAD_SAFE'
+ PTHREAD_LDFLAGS='-lpthreads'
+ if PTHREAD_LDFLAGS == 'error':
+ if conf.CHECK_FUNCS_IN('pthread_attr_init', 'c_r'):
+ PTHREAD_CFLAGS='-D_THREAD_SAFE -pthread'
+ PTHREAD_LDFLAGS='-pthread'
+ if PTHREAD_LDFLAGS == 'error':
+ if conf.CHECK_FUNCS('pthread_attr_init'):
+ PTHREAD_CFLAGS='-D_REENTRANT'
+ PTHREAD_LDFLAGS='-lpthread'
+ # especially for HP-UX, where the CHECK_FUNC macro fails to test for
+ # pthread_attr_init. On pthread_mutex_lock it works there...
+ if PTHREAD_LDFLAGS == 'error':
+ if conf.CHECK_FUNCS_IN('pthread_mutex_lock', 'pthread'):
+ PTHREAD_CFLAGS='-D_REENTRANT'
+ PTHREAD_LDFLAGS='-lpthread'
+
+ if PTHREAD_CFLAGS != 'error' and PTHREAD_LDFLAGS != 'error':
+ if conf.CONFIG_SET('replace_add_global_pthread'):
+ conf.ADD_CFLAGS(PTHREAD_CFLAGS)
+ conf.ADD_LDFLAGS(PTHREAD_LDFLAGS)
+ conf.CHECK_HEADERS('pthread.h')
+ conf.DEFINE('HAVE_PTHREAD', '1')
+
+ if conf.CONFIG_SET('HAVE_PTHREAD'):
+
+ conf.CHECK_DECLS('pthread_mutexattr_setrobust', headers='pthread.h')
+ if not conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEXATTR_SETROBUST'):
+ conf.CHECK_DECLS('pthread_mutexattr_setrobust_np',
+ headers='pthread.h')
+
+ conf.CHECK_FUNCS_IN('pthread_mutexattr_setrobust', 'pthread',
+ checklibc=True, headers='pthread.h')
+ if not conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST'):
+ conf.CHECK_FUNCS_IN('pthread_mutexattr_setrobust_np', 'pthread',
+ checklibc=True, headers='pthread.h')
+
+ conf.CHECK_DECLS('pthread_mutex_consistent', headers='pthread.h')
+ if not conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEX_CONSISTENT'):
+ conf.CHECK_DECLS('pthread_mutex_consistent_np',
+ headers='pthread.h')
+
+ conf.CHECK_FUNCS_IN('pthread_mutex_consistent', 'pthread',
+ checklibc=True, headers='pthread.h')
+ if not conf.CONFIG_SET('HAVE_PTHREAD_MUTEX_CONSISTENT'):
+ conf.CHECK_FUNCS_IN('pthread_mutex_consistent_np', 'pthread',
+ checklibc=True, headers='pthread.h')
+
+ if ((conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST') or
+ conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP')) and
+ (conf.CONFIG_SET('HAVE_PTHREAD_MUTEX_CONSISTENT') or
+ conf.CONFIG_SET('HAVE_PTHREAD_MUTEX_CONSISTENT_NP'))):
+ conf.DEFINE('HAVE_ROBUST_MUTEXES', 1)
+
+ conf.CHECK_FUNCS_IN('crypt', 'crypt', checklibc=True)
+
+ conf.CHECK_VARIABLE('rl_event_hook', define='HAVE_DECL_RL_EVENT_HOOK', always=True,
+ headers='readline.h readline/readline.h readline/history.h')
+
+ conf.CHECK_DECLS('snprintf vsnprintf asprintf vasprintf')
+
+ conf.CHECK_DECLS('errno', headers='errno.h', reverse=True)
+ conf.CHECK_DECLS('EWOULDBLOCK', headers='errno.h')
+ conf.CHECK_DECLS('environ getgrent_r getpwent_r', reverse=True, headers='pwd.h grp.h')
+ conf.CHECK_DECLS('pread pwrite setenv setresgid setresuid', reverse=True)
+
+ if conf.CONFIG_SET('HAVE_EPOLL_CREATE') and conf.CONFIG_SET('HAVE_SYS_EPOLL_H'):
+ conf.DEFINE('HAVE_EPOLL', 1)
+
+ if conf.CONFIG_SET('HAVE_PORT_CREATE') and conf.CONFIG_SET('HAVE_PORT_H'):
+ conf.DEFINE('HAVE_SOLARIS_PORTS', 1)
+
+ conf.CHECK_HEADERS('poll.h')
+ conf.CHECK_FUNCS('poll')
+
+ conf.CHECK_FUNCS('strptime')
+ conf.CHECK_DECLS('strptime', headers='time.h')
+ conf.CHECK_CODE('''#define LIBREPLACE_CONFIGURE_TEST_STRPTIME
+ #include "test/strptime.c"''',
+ define='HAVE_WORKING_STRPTIME',
+ execute=True,
+ addmain=False,
+ msg='Checking for working strptime')
+
+ conf.CHECK_CODE('gettimeofday(NULL, NULL)', 'HAVE_GETTIMEOFDAY_TZ', execute=False)
+
+ conf.CHECK_CODE('#include "test/snprintf.c"',
+ define="HAVE_C99_VSNPRINTF",
+ execute=True,
+ addmain=False,
+ msg="Checking for C99 vsnprintf")
+
+ conf.CHECK_CODE('#include "test/shared_mmap.c"',
+ addmain=False, add_headers=False, execute=True,
+ define='HAVE_SHARED_MMAP',
+ msg="Checking for HAVE_SHARED_MMAP")
+
+ conf.CHECK_CODE('#include "test/shared_mremap.c"',
+ addmain=False, add_headers=False, execute=True,
+ define='HAVE_MREMAP',
+ msg="Checking for HAVE_MREMAP")
+
+ # OpenBSD (and I've heard HPUX) doesn't sync between mmap and write.
+ # FIXME: Anything other than a 0 or 1 exit code should abort configure!
+ conf.CHECK_CODE('#include "test/incoherent_mmap.c"',
+ addmain=False, add_headers=False, execute=True,
+ define='HAVE_INCOHERENT_MMAP',
+ msg="Checking for HAVE_INCOHERENT_MMAP")
+
+ conf.SAMBA_BUILD_ENV()
+
+ conf.CHECK_CODE('''
+ typedef struct {unsigned x;} FOOBAR;
+ #define X_FOOBAR(x) ((FOOBAR) { x })
+ #define FOO_ONE X_FOOBAR(1)
+ FOOBAR f = FOO_ONE;
+ static const struct {
+ FOOBAR y;
+ } f2[] = {
+ {FOO_ONE}
+ };
+ static const FOOBAR f3[] = {FOO_ONE};
+ ''',
+ define='HAVE_IMMEDIATE_STRUCTURES')
+
+ conf.CHECK_CODE('mkdir("foo",0777)', define='HAVE_MKDIR_MODE', headers='sys/stat.h')
+
+ conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_mtim.tv_nsec', define='HAVE_STAT_TV_NSEC',
+ headers='sys/stat.h')
+ # we need the st_rdev test under two names
+ conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_rdev',
+ define='HAVE_STRUCT_STAT_ST_RDEV',
+ headers='sys/stat.h')
+ conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_rdev', define='HAVE_ST_RDEV',
+ headers='sys/stat.h')
+ conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_storage', 'ss_family',
+ headers='sys/socket.h netinet/in.h')
+ conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_storage', '__ss_family',
+ headers='sys/socket.h netinet/in.h')
+
+
+ if conf.CHECK_STRUCTURE_MEMBER('struct sockaddr', 'sa_len',
+ headers='sys/socket.h netinet/in.h',
+ define='HAVE_SOCKADDR_SA_LEN'):
+ # the old build system produced both defines
+ conf.DEFINE('HAVE_STRUCT_SOCKADDR_SA_LEN', 1)
+
+ conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_in', 'sin_len',
+ headers='sys/socket.h netinet/in.h',
+ define='HAVE_SOCK_SIN_LEN')
+
+ conf.CHECK_CODE('struct sockaddr_un sunaddr; sunaddr.sun_family = AF_UNIX;',
+ define='HAVE_UNIXSOCKET', headers='sys/socket.h sys/un.h')
+
+
+ conf.CHECK_CODE('''
+ struct stat st;
+ char tpl[20]="/tmp/test.XXXXXX";
+ char tpl2[20]="/tmp/test.XXXXXX";
+ int fd = mkstemp(tpl);
+ int fd2 = mkstemp(tpl2);
+ if (fd == -1) {
+ if (fd2 != -1) {
+ unlink(tpl2);
+ }
+ exit(1);
+ }
+ if (fd2 == -1) exit(1);
+ unlink(tpl);
+ unlink(tpl2);
+ if (fstat(fd, &st) != 0) exit(1);
+ if ((st.st_mode & 0777) != 0600) exit(1);
+ if (strcmp(tpl, "/tmp/test.XXXXXX") == 0) {
+ exit(1);
+ }
+ if (strcmp(tpl, tpl2) == 0) {
+ exit(1);
+ }
+ exit(0);
+ ''',
+ define='HAVE_SECURE_MKSTEMP',
+ execute=True,
+ mandatory=True) # lets see if we get a mandatory failure for this one
+
+ # look for a method of finding the list of network interfaces
+ for method in ['HAVE_IFACE_GETIFADDRS', 'HAVE_IFACE_AIX', 'HAVE_IFACE_IFCONF', 'HAVE_IFACE_IFREQ']:
+ if conf.CHECK_CODE('''
+ #define %s 1
+ #define NO_CONFIG_H 1
+ #define AUTOCONF_TEST 1
+ #include "replace.c"
+ #include "inet_ntop.c"
+ #include "snprintf.c"
+ #include "getifaddrs.c"
+ #define getifaddrs_test main
+ #include "test/getifaddrs.c"
+ ''' % method,
+ method,
+ lib='nsl socket',
+ addmain=False,
+ execute=True):
+ break
+
+ conf.RECURSE('system')
+ conf.SAMBA_CONFIG_H()
+
+
+REPLACEMENT_FUNCTIONS = {
+ 'replace.c': ['ftruncate', 'strlcpy', 'strlcat', 'mktime', 'initgroups',
+ 'memmove', 'strdup', 'setlinebuf', 'vsyslog', 'strnlen',
+ 'strndup', 'waitpid', 'seteuid', 'setegid', 'chroot',
+ 'mkstemp', 'mkdtemp', 'pread', 'pwrite', 'strcasestr',
+ 'strtok_r', 'strtoll', 'strtoull', 'setenv', 'unsetenv',
+ 'utime', 'utimes', 'dup2', 'chown', 'link', 'readlink',
+ 'symlink', 'lchown', 'realpath', 'memmem', 'vdprintf',
+ 'dprintf', 'get_current_dir_name',
+ 'strerror_r', 'clock_gettime'],
+ 'timegm.c': ['timegm'],
+ # Note: C99_VSNPRINTF is not a function, but a special condition
+ # for replacement
+ 'snprintf.c': ['C99_VSNPRINTF', 'snprintf', 'vsnprintf', 'asprintf', 'vasprintf'],
+ # Note: WORKING_STRPTIME is not a function, but a special condition
+ # for replacement
+ 'strptime.c': ['WORKING_STRPTIME', 'strptime'],
+ }
+
+
+def build(bld):
+ bld.RECURSE('buildtools/wafsamba')
+
+ REPLACE_HOSTCC_SOURCE = ''
+
+ for filename, functions in REPLACEMENT_FUNCTIONS.iteritems():
+ for function in functions:
+ if not bld.CONFIG_SET('HAVE_%s' % function.upper()):
+ REPLACE_HOSTCC_SOURCE += ' %s' % filename
+ break
+
+ extra_libs = ''
+ if bld.CONFIG_SET('HAVE_LIBBSD'): extra_libs += ' bsd'
+
+ bld.SAMBA_SUBSYSTEM('LIBREPLACE_HOSTCC',
+ REPLACE_HOSTCC_SOURCE,
+ use_hostcc=True,
+ use_global_deps=False,
+ cflags='-D_SAMBA_HOSTCC_',
+ group='compiler_libraries',
+ deps = extra_libs
+ )
+
+ REPLACE_SOURCE = REPLACE_HOSTCC_SOURCE
+ REPLACE_SOURCE += ' cwrap.c'
+
+ if not bld.CONFIG_SET('HAVE_CRYPT'): REPLACE_SOURCE += ' crypt.c'
+ if not bld.CONFIG_SET('HAVE_DLOPEN'): REPLACE_SOURCE += ' dlfcn.c'
+ if not bld.CONFIG_SET('HAVE_POLL'): REPLACE_SOURCE += ' poll.c'
+
+ if not bld.CONFIG_SET('HAVE_SOCKETPAIR'): REPLACE_SOURCE += ' socketpair.c'
+ if not bld.CONFIG_SET('HAVE_CONNECT'): REPLACE_SOURCE += ' socket.c'
+ if not bld.CONFIG_SET('HAVE_GETIFADDRS'): REPLACE_SOURCE += ' getifaddrs.c'
+ if not bld.CONFIG_SET('HAVE_GETADDRINFO'): REPLACE_SOURCE += ' getaddrinfo.c'
+ if not bld.CONFIG_SET('HAVE_INET_NTOA'): REPLACE_SOURCE += ' inet_ntoa.c'
+ if not bld.CONFIG_SET('HAVE_INET_ATON'): REPLACE_SOURCE += ' inet_aton.c'
+ if not bld.CONFIG_SET('HAVE_INET_NTOP'): REPLACE_SOURCE += ' inet_ntop.c'
+ if not bld.CONFIG_SET('HAVE_INET_PTON'): REPLACE_SOURCE += ' inet_pton.c'
+ if not bld.CONFIG_SET('HAVE_GETXATTR') or bld.CONFIG_SET('XATTR_ADDITIONAL_OPTIONS'):
+ REPLACE_SOURCE += ' xattr.c'
+
+ bld.SAMBA_LIBRARY('replace',
+ source=REPLACE_SOURCE,
+ group='base_libraries',
+ # FIXME: Ideally symbols should be hidden here so they
+ # don't appear in the global namespace when Samba
+ # libraries are loaded, but this doesn't appear to work
+ # at the moment:
+ # hide_symbols=bld.BUILTIN_LIBRARY('replace'),
+ private_library=True,
+ deps='crypt dl nsl socket rt attr' + extra_libs)
+
+ bld.SAMBA_SUBSYSTEM('replace-test',
+ source='''test/testsuite.c test/strptime.c
+ test/os2_delete.c test/getifaddrs.c''',
+ deps='replace')
+
+ if bld.env.standalone_replace:
+ bld.SAMBA_BINARY('replace_testsuite',
+ source='test/main.c',
+ deps='replace replace-test',
+ install=False)
+
+ # build replacements for stdint.h and stdbool.h if needed
+ bld.SAMBA_GENERATOR('replace_stdint_h',
+ rule='cp ${SRC} ${TGT}',
+ source='hdr_replace.h',
+ target='stdint.h',
+ enabled = not bld.CONFIG_SET('HAVE_STDINT_H'))
+ bld.SAMBA_GENERATOR('replace_stdbool_h',
+ rule='cp ${SRC} ${TGT}',
+ source='hdr_replace.h',
+ target='stdbool.h',
+ enabled = not bld.CONFIG_SET('HAVE_STDBOOL_H'))
+
+ bld.SAMBA_SUBSYSTEM('samba_intl', source='', use_global_deps=False,deps=bld.env.intl_libs)
+
+def dist():
+ '''makes a tarball for distribution'''
+ samba_dist.dist()
diff --git a/lib/replace/xattr.c b/lib/replace/xattr.c
new file mode 100644
index 0000000..ce52d1a
--- /dev/null
+++ b/lib/replace/xattr.c
@@ -0,0 +1,734 @@
+/*
+ Unix SMB/CIFS implementation.
+ replacement routines for xattr implementations
+ Copyright (C) Jeremy Allison 1998-2005
+ Copyright (C) Timur Bakeyev 2005
+ Copyright (C) Bjoern Jacke 2006-2007
+ Copyright (C) Herb Lewis 2003
+ Copyright (C) Andrew Bartlett 2012
+
+ ** NOTE! The following LGPL license applies to the replace
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#define UID_WRAPPER_NOT_REPLACE
+#include "replace.h"
+#include "system/filesys.h"
+#include "system/dir.h"
+
+/******** Solaris EA helper function prototypes ********/
+#ifdef HAVE_ATTROPEN
+#define SOLARIS_ATTRMODE S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP
+static int solaris_write_xattr(int attrfd, const char *value, size_t size);
+static ssize_t solaris_read_xattr(int attrfd, void *value, size_t size);
+static ssize_t solaris_list_xattr(int attrdirfd, char *list, size_t size);
+static int solaris_unlinkat(int attrdirfd, const char *name);
+static int solaris_attropen(const char *path, const char *attrpath, int oflag, mode_t mode);
+static int solaris_openat(int fildes, const char *path, int oflag, mode_t mode);
+#endif
+
+/**************************************************************************
+ Wrappers for extented attribute calls. Based on the Linux package with
+ support for IRIX and (Net|Free)BSD also. Expand as other systems have them.
+****************************************************************************/
+
+ssize_t rep_getxattr (const char *path, const char *name, void *value, size_t size)
+{
+#if defined(HAVE_GETXATTR)
+#ifndef XATTR_ADDITIONAL_OPTIONS
+ return getxattr(path, name, value, size);
+#else
+
+/* So that we do not recursivly call this function */
+#undef getxattr
+ int options = 0;
+ return getxattr(path, name, value, size, 0, options);
+#endif
+#elif defined(HAVE_GETEA)
+ return getea(path, name, value, size);
+#elif defined(HAVE_EXTATTR_GET_FILE)
+ char *s;
+ ssize_t retval;
+ int attrnamespace = (strncmp(name, "system", 6) == 0) ?
+ EXTATTR_NAMESPACE_SYSTEM : EXTATTR_NAMESPACE_USER;
+ const char *attrname = ((s=strchr(name, '.')) == NULL) ? name : s + 1;
+ /*
+ * The BSD implementation has a nasty habit of silently truncating
+ * the returned value to the size of the buffer, so we have to check
+ * that the buffer is large enough to fit the returned value.
+ */
+ if((retval=extattr_get_file(path, attrnamespace, attrname, NULL, 0)) >= 0) {
+ if (size == 0) {
+ return retval;
+ } else if (retval > size) {
+ errno = ERANGE;
+ return -1;
+ }
+ if((retval=extattr_get_file(path, attrnamespace, attrname, value, size)) >= 0)
+ return retval;
+ }
+
+ return -1;
+#elif defined(HAVE_ATTR_GET)
+ int retval, flags = 0;
+ int valuelength = (int)size;
+ char *attrname = strchr(name,'.') + 1;
+
+ if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT;
+
+ retval = attr_get(path, attrname, (char *)value, &valuelength, flags);
+ if (size == 0 && retval == -1 && errno == E2BIG) {
+ return valuelength;
+ }
+
+ return retval ? retval : valuelength;
+#elif defined(HAVE_ATTROPEN)
+ ssize_t ret = -1;
+ int attrfd = solaris_attropen(path, name, O_RDONLY, 0);
+ if (attrfd >= 0) {
+ ret = solaris_read_xattr(attrfd, value, size);
+ close(attrfd);
+ }
+ return ret;
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+ssize_t rep_fgetxattr (int filedes, const char *name, void *value, size_t size)
+{
+#if defined(HAVE_FGETXATTR)
+#ifndef XATTR_ADDITIONAL_OPTIONS
+ return fgetxattr(filedes, name, value, size);
+#else
+
+/* So that we do not recursivly call this function */
+#undef fgetxattr
+ int options = 0;
+ return fgetxattr(filedes, name, value, size, 0, options);
+#endif
+#elif defined(HAVE_FGETEA)
+ return fgetea(filedes, name, value, size);
+#elif defined(HAVE_EXTATTR_GET_FD)
+ char *s;
+ ssize_t retval;
+ int attrnamespace = (strncmp(name, "system", 6) == 0) ?
+ EXTATTR_NAMESPACE_SYSTEM : EXTATTR_NAMESPACE_USER;
+ const char *attrname = ((s=strchr(name, '.')) == NULL) ? name : s + 1;
+
+ if((retval=extattr_get_fd(filedes, attrnamespace, attrname, NULL, 0)) >= 0) {
+ if (size == 0) {
+ return retval;
+ } else if (retval > size) {
+ errno = ERANGE;
+ return -1;
+ }
+ if((retval=extattr_get_fd(filedes, attrnamespace, attrname, value, size)) >= 0)
+ return retval;
+ }
+
+ return -1;
+#elif defined(HAVE_ATTR_GETF)
+ int retval, flags = 0;
+ int valuelength = (int)size;
+ char *attrname = strchr(name,'.') + 1;
+
+ if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT;
+
+ retval = attr_getf(filedes, attrname, (char *)value, &valuelength, flags);
+ if (size == 0 && retval == -1 && errno == E2BIG) {
+ return valuelength;
+ }
+ return retval ? retval : valuelength;
+#elif defined(HAVE_ATTROPEN)
+ ssize_t ret = -1;
+ int attrfd = solaris_openat(filedes, name, O_RDONLY|O_XATTR, 0);
+ if (attrfd >= 0) {
+ ret = solaris_read_xattr(attrfd, value, size);
+ close(attrfd);
+ }
+ return ret;
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+#if defined(HAVE_EXTATTR_LIST_FILE)
+
+#define EXTATTR_PREFIX(s) (s), (sizeof((s))-1)
+
+static struct {
+ int space;
+ const char *name;
+ size_t len;
+}
+extattr[] = {
+ { EXTATTR_NAMESPACE_SYSTEM, EXTATTR_PREFIX("system.") },
+ { EXTATTR_NAMESPACE_USER, EXTATTR_PREFIX("user.") },
+};
+
+typedef union {
+ const char *path;
+ int filedes;
+} extattr_arg;
+
+static ssize_t bsd_attr_list (int type, extattr_arg arg, char *list, size_t size)
+{
+ ssize_t list_size, total_size = 0;
+ int i, t, len;
+ char *buf;
+ /* Iterate through extattr(2) namespaces */
+ for(t = 0; t < ARRAY_SIZE(extattr); t++) {
+ if (t != EXTATTR_NAMESPACE_USER && geteuid() != 0) {
+ /* ignore all but user namespace when we are not root, see bug 10247 */
+ continue;
+ }
+ switch(type) {
+#if defined(HAVE_EXTATTR_LIST_FILE)
+ case 0:
+ list_size = extattr_list_file(arg.path, extattr[t].space, list, size);
+ break;
+#endif
+#if defined(HAVE_EXTATTR_LIST_LINK)
+ case 1:
+ list_size = extattr_list_link(arg.path, extattr[t].space, list, size);
+ break;
+#endif
+#if defined(HAVE_EXTATTR_LIST_FD)
+ case 2:
+ list_size = extattr_list_fd(arg.filedes, extattr[t].space, list, size);
+ break;
+#endif
+ default:
+ errno = ENOSYS;
+ return -1;
+ }
+ /* Some error happend. Errno should be set by the previous call */
+ if(list_size < 0)
+ return -1;
+ /* No attributes */
+ if(list_size == 0)
+ continue;
+ /* XXX: Call with an empty buffer may be used to calculate
+ necessary buffer size. Unfortunately, we can't say, how
+ many attributes were returned, so here is the potential
+ problem with the emulation.
+ */
+ if(list == NULL) {
+ /* Take the worse case of one char attribute names -
+ two bytes per name plus one more for sanity.
+ */
+ total_size += list_size + (list_size/2 + 1)*extattr[t].len;
+ continue;
+ }
+ /* Count necessary offset to fit namespace prefixes */
+ len = 0;
+ for(i = 0; i < list_size; i += list[i] + 1)
+ len += extattr[t].len;
+
+ total_size += list_size + len;
+ /* Buffer is too small to fit the results */
+ if(total_size > size) {
+ errno = ERANGE;
+ return -1;
+ }
+ /* Shift results back, so we can prepend prefixes */
+ buf = (char *)memmove(list + len, list, list_size);
+
+ for(i = 0; i < list_size; i += len + 1) {
+ len = buf[i];
+ strncpy(list, extattr[t].name, extattr[t].len + 1);
+ list += extattr[t].len;
+ strncpy(list, buf + i + 1, len);
+ list[len] = '\0';
+ list += len + 1;
+ }
+ size -= total_size;
+ }
+ return total_size;
+}
+
+#endif
+
+#if defined(HAVE_ATTR_LIST) && (defined(HAVE_SYS_ATTRIBUTES_H) || defined(HAVE_ATTR_ATTRIBUTES_H))
+static char attr_buffer[ATTR_MAX_VALUELEN];
+
+static ssize_t irix_attr_list(const char *path, int filedes, char *list, size_t size, int flags)
+{
+ int retval = 0, index;
+ attrlist_cursor_t *cursor = 0;
+ int total_size = 0;
+ attrlist_t * al = (attrlist_t *)attr_buffer;
+ attrlist_ent_t *ae;
+ size_t ent_size, left = size;
+ char *bp = list;
+
+ while (true) {
+ if (filedes)
+ retval = attr_listf(filedes, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor);
+ else
+ retval = attr_list(path, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor);
+ if (retval) break;
+ for (index = 0; index < al->al_count; index++) {
+ ae = ATTR_ENTRY(attr_buffer, index);
+ ent_size = strlen(ae->a_name) + sizeof("user.");
+ if (left >= ent_size) {
+ strncpy(bp, "user.", sizeof("user."));
+ strncat(bp, ae->a_name, ent_size - sizeof("user."));
+ bp += ent_size;
+ left -= ent_size;
+ } else if (size) {
+ errno = ERANGE;
+ retval = -1;
+ break;
+ }
+ total_size += ent_size;
+ }
+ if (al->al_more == 0) break;
+ }
+ if (retval == 0) {
+ flags |= ATTR_ROOT;
+ cursor = 0;
+ while (true) {
+ if (filedes)
+ retval = attr_listf(filedes, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor);
+ else
+ retval = attr_list(path, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor);
+ if (retval) break;
+ for (index = 0; index < al->al_count; index++) {
+ ae = ATTR_ENTRY(attr_buffer, index);
+ ent_size = strlen(ae->a_name) + sizeof("system.");
+ if (left >= ent_size) {
+ strncpy(bp, "system.", sizeof("system."));
+ strncat(bp, ae->a_name, ent_size - sizeof("system."));
+ bp += ent_size;
+ left -= ent_size;
+ } else if (size) {
+ errno = ERANGE;
+ retval = -1;
+ break;
+ }
+ total_size += ent_size;
+ }
+ if (al->al_more == 0) break;
+ }
+ }
+ return (ssize_t)(retval ? retval : total_size);
+}
+
+#endif
+
+ssize_t rep_listxattr (const char *path, char *list, size_t size)
+{
+#if defined(HAVE_LISTXATTR)
+#ifndef XATTR_ADDITIONAL_OPTIONS
+ return listxattr(path, list, size);
+#else
+/* So that we do not recursivly call this function */
+#undef listxattr
+ int options = 0;
+ return listxattr(path, list, size, options);
+#endif
+#elif defined(HAVE_LISTEA)
+ return listea(path, list, size);
+#elif defined(HAVE_EXTATTR_LIST_FILE)
+ extattr_arg arg;
+ arg.path = path;
+ return bsd_attr_list(0, arg, list, size);
+#elif defined(HAVE_ATTR_LIST) && defined(HAVE_SYS_ATTRIBUTES_H)
+ return irix_attr_list(path, 0, list, size, 0);
+#elif defined(HAVE_ATTROPEN)
+ ssize_t ret = -1;
+ int attrdirfd = solaris_attropen(path, ".", O_RDONLY, 0);
+ if (attrdirfd >= 0) {
+ ret = solaris_list_xattr(attrdirfd, list, size);
+ close(attrdirfd);
+ }
+ return ret;
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+ssize_t rep_flistxattr (int filedes, char *list, size_t size)
+{
+#if defined(HAVE_FLISTXATTR)
+#ifndef XATTR_ADDITIONAL_OPTIONS
+ return flistxattr(filedes, list, size);
+#else
+/* So that we do not recursivly call this function */
+#undef flistxattr
+ int options = 0;
+ return flistxattr(filedes, list, size, options);
+#endif
+#elif defined(HAVE_FLISTEA)
+ return flistea(filedes, list, size);
+#elif defined(HAVE_EXTATTR_LIST_FD)
+ extattr_arg arg;
+ arg.filedes = filedes;
+ return bsd_attr_list(2, arg, list, size);
+#elif defined(HAVE_ATTR_LISTF)
+ return irix_attr_list(NULL, filedes, list, size, 0);
+#elif defined(HAVE_ATTROPEN)
+ ssize_t ret = -1;
+ int attrdirfd = solaris_openat(filedes, ".", O_RDONLY|O_XATTR, 0);
+ if (attrdirfd >= 0) {
+ ret = solaris_list_xattr(attrdirfd, list, size);
+ close(attrdirfd);
+ }
+ return ret;
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+int rep_removexattr (const char *path, const char *name)
+{
+#if defined(HAVE_REMOVEXATTR)
+#ifndef XATTR_ADDITIONAL_OPTIONS
+ return removexattr(path, name);
+#else
+/* So that we do not recursivly call this function */
+#undef removexattr
+ int options = 0;
+ return removexattr(path, name, options);
+#endif
+#elif defined(HAVE_REMOVEEA)
+ return removeea(path, name);
+#elif defined(HAVE_EXTATTR_DELETE_FILE)
+ char *s;
+ int attrnamespace = (strncmp(name, "system", 6) == 0) ?
+ EXTATTR_NAMESPACE_SYSTEM : EXTATTR_NAMESPACE_USER;
+ const char *attrname = ((s=strchr(name, '.')) == NULL) ? name : s + 1;
+
+ return extattr_delete_file(path, attrnamespace, attrname);
+#elif defined(HAVE_ATTR_REMOVE)
+ int flags = 0;
+ char *attrname = strchr(name,'.') + 1;
+
+ if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT;
+
+ return attr_remove(path, attrname, flags);
+#elif defined(HAVE_ATTROPEN)
+ int ret = -1;
+ int attrdirfd = solaris_attropen(path, ".", O_RDONLY, 0);
+ if (attrdirfd >= 0) {
+ ret = solaris_unlinkat(attrdirfd, name);
+ close(attrdirfd);
+ }
+ return ret;
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+int rep_fremovexattr (int filedes, const char *name)
+{
+#if defined(HAVE_FREMOVEXATTR)
+#ifndef XATTR_ADDITIONAL_OPTIONS
+ return fremovexattr(filedes, name);
+#else
+/* So that we do not recursivly call this function */
+#undef fremovexattr
+ int options = 0;
+ return fremovexattr(filedes, name, options);
+#endif
+#elif defined(HAVE_FREMOVEEA)
+ return fremoveea(filedes, name);
+#elif defined(HAVE_EXTATTR_DELETE_FD)
+ char *s;
+ int attrnamespace = (strncmp(name, "system", 6) == 0) ?
+ EXTATTR_NAMESPACE_SYSTEM : EXTATTR_NAMESPACE_USER;
+ const char *attrname = ((s=strchr(name, '.')) == NULL) ? name : s + 1;
+
+ return extattr_delete_fd(filedes, attrnamespace, attrname);
+#elif defined(HAVE_ATTR_REMOVEF)
+ int flags = 0;
+ char *attrname = strchr(name,'.') + 1;
+
+ if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT;
+
+ return attr_removef(filedes, attrname, flags);
+#elif defined(HAVE_ATTROPEN)
+ int ret = -1;
+ int attrdirfd = solaris_openat(filedes, ".", O_RDONLY|O_XATTR, 0);
+ if (attrdirfd >= 0) {
+ ret = solaris_unlinkat(attrdirfd, name);
+ close(attrdirfd);
+ }
+ return ret;
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+int rep_setxattr (const char *path, const char *name, const void *value, size_t size, int flags)
+{
+#if defined(HAVE_SETXATTR)
+#ifndef XATTR_ADDITIONAL_OPTIONS
+ return setxattr(path, name, value, size, flags);
+#else
+/* So that we do not recursivly call this function */
+#undef setxattr
+ int options = 0;
+ return setxattr(path, name, value, size, 0, options);
+#endif
+#elif defined(HAVE_SETEA)
+ return setea(path, name, value, size, flags);
+#elif defined(HAVE_EXTATTR_SET_FILE)
+ char *s;
+ int retval = 0;
+ int attrnamespace = (strncmp(name, "system", 6) == 0) ?
+ EXTATTR_NAMESPACE_SYSTEM : EXTATTR_NAMESPACE_USER;
+ const char *attrname = ((s=strchr(name, '.')) == NULL) ? name : s + 1;
+ if (flags) {
+ /* Check attribute existence */
+ retval = extattr_get_file(path, attrnamespace, attrname, NULL, 0);
+ if (retval < 0) {
+ /* REPLACE attribute, that doesn't exist */
+ if (flags & XATTR_REPLACE && errno == ENOATTR) {
+ errno = ENOATTR;
+ return -1;
+ }
+ /* Ignore other errors */
+ }
+ else {
+ /* CREATE attribute, that already exists */
+ if (flags & XATTR_CREATE) {
+ errno = EEXIST;
+ return -1;
+ }
+ }
+ }
+ retval = extattr_set_file(path, attrnamespace, attrname, value, size);
+ return (retval < 0) ? -1 : 0;
+#elif defined(HAVE_ATTR_SET)
+ int myflags = 0;
+ char *attrname = strchr(name,'.') + 1;
+
+ if (strncmp(name, "system", 6) == 0) myflags |= ATTR_ROOT;
+ if (flags & XATTR_CREATE) myflags |= ATTR_CREATE;
+ if (flags & XATTR_REPLACE) myflags |= ATTR_REPLACE;
+
+ return attr_set(path, attrname, (const char *)value, size, myflags);
+#elif defined(HAVE_ATTROPEN)
+ int ret = -1;
+ int myflags = O_RDWR;
+ int attrfd;
+ if (flags & XATTR_CREATE) myflags |= O_EXCL;
+ if (!(flags & XATTR_REPLACE)) myflags |= O_CREAT;
+ attrfd = solaris_attropen(path, name, myflags, (mode_t) SOLARIS_ATTRMODE);
+ if (attrfd >= 0) {
+ ret = solaris_write_xattr(attrfd, value, size);
+ close(attrfd);
+ }
+ return ret;
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+int rep_fsetxattr (int filedes, const char *name, const void *value, size_t size, int flags)
+{
+#if defined(HAVE_FSETXATTR)
+#ifndef XATTR_ADDITIONAL_OPTIONS
+ return fsetxattr(filedes, name, value, size, flags);
+#else
+/* So that we do not recursivly call this function */
+#undef fsetxattr
+ int options = 0;
+ return fsetxattr(filedes, name, value, size, 0, options);
+#endif
+#elif defined(HAVE_FSETEA)
+ return fsetea(filedes, name, value, size, flags);
+#elif defined(HAVE_EXTATTR_SET_FD)
+ char *s;
+ int retval = 0;
+ int attrnamespace = (strncmp(name, "system", 6) == 0) ?
+ EXTATTR_NAMESPACE_SYSTEM : EXTATTR_NAMESPACE_USER;
+ const char *attrname = ((s=strchr(name, '.')) == NULL) ? name : s + 1;
+ if (flags) {
+ /* Check attribute existence */
+ retval = extattr_get_fd(filedes, attrnamespace, attrname, NULL, 0);
+ if (retval < 0) {
+ /* REPLACE attribute, that doesn't exist */
+ if (flags & XATTR_REPLACE && errno == ENOATTR) {
+ errno = ENOATTR;
+ return -1;
+ }
+ /* Ignore other errors */
+ }
+ else {
+ /* CREATE attribute, that already exists */
+ if (flags & XATTR_CREATE) {
+ errno = EEXIST;
+ return -1;
+ }
+ }
+ }
+ retval = extattr_set_fd(filedes, attrnamespace, attrname, value, size);
+ return (retval < 0) ? -1 : 0;
+#elif defined(HAVE_ATTR_SETF)
+ int myflags = 0;
+ char *attrname = strchr(name,'.') + 1;
+
+ if (strncmp(name, "system", 6) == 0) myflags |= ATTR_ROOT;
+ if (flags & XATTR_CREATE) myflags |= ATTR_CREATE;
+ if (flags & XATTR_REPLACE) myflags |= ATTR_REPLACE;
+
+ return attr_setf(filedes, attrname, (const char *)value, size, myflags);
+#elif defined(HAVE_ATTROPEN)
+ int ret = -1;
+ int myflags = O_RDWR | O_XATTR;
+ int attrfd;
+ if (flags & XATTR_CREATE) myflags |= O_EXCL;
+ if (!(flags & XATTR_REPLACE)) myflags |= O_CREAT;
+ attrfd = solaris_openat(filedes, name, myflags, (mode_t) SOLARIS_ATTRMODE);
+ if (attrfd >= 0) {
+ ret = solaris_write_xattr(attrfd, value, size);
+ close(attrfd);
+ }
+ return ret;
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+/**************************************************************************
+ helper functions for Solaris' EA support
+****************************************************************************/
+#ifdef HAVE_ATTROPEN
+static ssize_t solaris_read_xattr(int attrfd, void *value, size_t size)
+{
+ struct stat sbuf;
+
+ if (fstat(attrfd, &sbuf) == -1) {
+ errno = ENOATTR;
+ return -1;
+ }
+
+ /* This is to return the current size of the named extended attribute */
+ if (size == 0) {
+ return sbuf.st_size;
+ }
+
+ /* check size and read xattr */
+ if (sbuf.st_size > size) {
+ errno = ERANGE;
+ return -1;
+ }
+
+ return read(attrfd, value, sbuf.st_size);
+}
+
+static ssize_t solaris_list_xattr(int attrdirfd, char *list, size_t size)
+{
+ ssize_t len = 0;
+ DIR *dirp;
+ struct dirent *de;
+ int newfd = dup(attrdirfd);
+ /* CAUTION: The originating file descriptor should not be
+ used again following the call to fdopendir().
+ For that reason we dup() the file descriptor
+ here to make things more clear. */
+ dirp = fdopendir(newfd);
+
+ while ((de = readdir(dirp))) {
+ size_t listlen = strlen(de->d_name) + 1;
+ if (!strcmp(de->d_name, ".") || !strcmp(de->d_name, "..")) {
+ /* we don't want "." and ".." here: */
+ continue;
+ }
+
+ if (size == 0) {
+ /* return the current size of the list of extended attribute names*/
+ len += listlen;
+ } else {
+ /* check size and copy entrieѕ + nul into list. */
+ if ((len + listlen) > size) {
+ errno = ERANGE;
+ len = -1;
+ break;
+ } else {
+ strlcpy(list + len, de->d_name, listlen);
+ len += listlen;
+ }
+ }
+ }
+
+ if (closedir(dirp) == -1) {
+ return -1;
+ }
+ return len;
+}
+
+static int solaris_unlinkat(int attrdirfd, const char *name)
+{
+ if (unlinkat(attrdirfd, name, 0) == -1) {
+ if (errno == ENOENT) {
+ errno = ENOATTR;
+ }
+ return -1;
+ }
+ return 0;
+}
+
+static int solaris_attropen(const char *path, const char *attrpath, int oflag, mode_t mode)
+{
+ int filedes = attropen(path, attrpath, oflag, mode);
+ if (filedes == -1) {
+ if (errno == EINVAL) {
+ errno = ENOTSUP;
+ } else {
+ errno = ENOATTR;
+ }
+ }
+ return filedes;
+}
+
+static int solaris_openat(int fildes, const char *path, int oflag, mode_t mode)
+{
+ int filedes = openat(fildes, path, oflag, mode);
+ if (filedes == -1) {
+ if (errno == EINVAL) {
+ errno = ENOTSUP;
+ } else {
+ errno = ENOATTR;
+ }
+ }
+ return filedes;
+}
+
+static int solaris_write_xattr(int attrfd, const char *value, size_t size)
+{
+ if ((ftruncate(attrfd, 0) == 0) && (write(attrfd, value, size) == size)) {
+ return 0;
+ } else {
+ return -1;
+ }
+}
+#endif /*HAVE_ATTROPEN*/
+
+
diff --git a/man/tdbbackup.8.xml b/man/tdbbackup.8.xml
new file mode 100644
index 0000000..740b3d5
--- /dev/null
+++ b/man/tdbbackup.8.xml
@@ -0,0 +1,149 @@
+<?xml version="1.0" encoding="iso-8859-1"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
+<refentry id="tdbbackup.8">
+<refentryinfo><date>2015-04-25</date></refentryinfo>
+
+<refmeta>
+ <refentrytitle>tdbbackup</refentrytitle>
+ <manvolnum>8</manvolnum>
+ <refmiscinfo class="source">Samba</refmiscinfo>
+ <refmiscinfo class="manual">System Administration tools</refmiscinfo>
+ <refmiscinfo class="version">3.6</refmiscinfo>
+</refmeta>
+
+
+<refnamediv>
+ <refname>tdbbackup</refname>
+ <refpurpose>tool for backing up and for validating the integrity of samba .tdb files</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+ <cmdsynopsis>
+ <command>tdbbackup</command>
+ <arg choice="opt">-s suffix</arg>
+ <arg choice="opt">-v</arg>
+ <arg choice="opt">-h</arg>
+ <arg choice="opt">-l</arg>
+ </cmdsynopsis>
+</refsynopsisdiv>
+
+<refsect1>
+ <title>DESCRIPTION</title>
+
+ <para>This tool is part of the <citerefentry><refentrytitle>samba</refentrytitle>
+ <manvolnum>1</manvolnum></citerefentry> suite.</para>
+
+ <para><command>tdbbackup</command> is a tool that may be used to backup samba .tdb
+ files. This tool may also be used to verify the integrity of the .tdb files prior
+ to samba startup or during normal operation. If it finds file damage and it finds
+ a prior backup the backup file will be restored.
+ </para>
+</refsect1>
+
+
+<refsect1>
+ <title>OPTIONS</title>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>-h</term>
+ <listitem><para>
+ Get help information.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>-s suffix</term>
+ <listitem><para>
+ The <command>-s</command> option allows the administrator to specify a file
+ backup extension. This way it is possible to keep a history of tdb backup
+ files by using a new suffix for each backup.
+ </para> </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>-v</term>
+ <listitem><para>
+ The <command>-v</command> will check the database for damages (corrupt data)
+ which if detected causes the backup to be restored.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>-l</term>
+ <listitem><para>
+ This options disables any locking, by passing TDB_NOLOCK
+ to tdb_open_ex(). Only use this for database files which
+ are not used by any other process! And also only if it is otherwise not
+ possible to open the database, e.g. databases which were created with
+ mutex locking.
+ </para></listitem>
+ </varlistentry>
+
+ </variablelist>
+</refsect1>
+
+
+<refsect1>
+ <title>COMMANDS</title>
+
+ <para><emphasis>GENERAL INFORMATION</emphasis></para>
+
+ <para>
+ The <command>tdbbackup</command> utility can safely be run at any time. It was designed so
+ that it can be used at any time to validate the integrity of tdb files, even during Samba
+ operation. Typical usage for the command will be:
+ </para>
+
+ <para>tdbbackup [-s suffix] *.tdb</para>
+
+ <para>
+ Before restarting samba the following command may be run to validate .tdb files:
+ </para>
+
+ <para>tdbbackup -v [-s suffix] *.tdb</para>
+
+ <para>
+ Samba .tdb files are stored in various locations, be sure to run backup all
+ .tdb file on the system. Important files includes:
+ </para>
+
+ <itemizedlist>
+ <listitem><para>
+ <command>secrets.tdb</command> - usual location is in the /usr/local/samba/private
+ directory, or on some systems in /etc/samba.
+ </para></listitem>
+
+ <listitem><para>
+ <command>passdb.tdb</command> - usual location is in the /usr/local/samba/private
+ directory, or on some systems in /etc/samba.
+ </para></listitem>
+
+ <listitem><para>
+ <command>*.tdb</command> located in the /usr/local/samba/var directory or on some
+ systems in the /var/cache or /var/lib/samba directories.
+ </para></listitem>
+ </itemizedlist>
+
+</refsect1>
+
+<refsect1>
+ <title>VERSION</title>
+
+ <para>This man page is correct for version 3 of the Samba suite.</para>
+</refsect1>
+
+<refsect1>
+ <title>AUTHOR</title>
+
+ <para>
+ The original Samba software and related utilities were created by Andrew Tridgell.
+ Samba is now developed by the Samba Team as an Open Source project similar to the way
+ the Linux kernel is developed.
+ </para>
+
+ <para>The tdbbackup man page was written by John H Terpstra.</para>
+</refsect1>
+
+</refentry>
diff --git a/man/tdbdump.8.xml b/man/tdbdump.8.xml
new file mode 100644
index 0000000..31e6888
--- /dev/null
+++ b/man/tdbdump.8.xml
@@ -0,0 +1,93 @@
+<?xml version="1.0" encoding="iso-8859-1"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
+<refentry id="tdbdump.8">
+<refentryinfo><date>2015-04-25</date></refentryinfo>
+
+<refmeta>
+ <refentrytitle>tdbdump</refentrytitle>
+ <manvolnum>8</manvolnum>
+ <refmiscinfo class="source">Samba</refmiscinfo>
+ <refmiscinfo class="manual">System Administration tools</refmiscinfo>
+ <refmiscinfo class="version">3.6</refmiscinfo>
+</refmeta>
+
+
+<refnamediv>
+ <refname>tdbdump</refname>
+ <refpurpose>tool for printing the contents of a TDB file</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+ <cmdsynopsis>
+ <command>tdbdump</command>
+ <arg choice="opt">-k <replaceable>keyname</replaceable></arg>
+ <arg choice="opt">-e</arg>
+ <arg choice="opt">-h</arg>
+ <arg choice="req">filename</arg>
+ </cmdsynopsis>
+</refsynopsisdiv>
+
+<refsect1>
+ <title>DESCRIPTION</title>
+
+ <para>This tool is part of the <citerefentry><refentrytitle>samba</refentrytitle>
+ <manvolnum>1</manvolnum></citerefentry> suite.</para>
+
+ <para><command>tdbdump</command> is a very simple utility that 'dumps' the
+ contents of a TDB (Trivial DataBase) file to standard output in a
+ human-readable format.
+ </para>
+
+ <para>This tool can be used when debugging problems with TDB files. It is
+ intended for those who are somewhat familiar with Samba internals.
+ </para>
+</refsect1>
+
+<refsect1>
+ <title>OPTIONS</title>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>-h</term>
+ <listitem><para>
+ Get help information.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>-k <replaceable>keyname</replaceable></term>
+ <listitem><para>
+ The <command>-k</command> option restricts dumping to a single key, if found.
+ </para> </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>-e</term>
+ <listitem><para>
+ The <command>-e</command> tries to dump out from a corrupt database. Naturally, such a dump is unreliable, at best.
+ </para></listitem>
+ </varlistentry>
+
+ </variablelist>
+</refsect1>
+
+<refsect1>
+ <title>VERSION</title>
+
+ <para>This man page is correct for version 3 of the Samba suite.</para>
+</refsect1>
+
+<refsect1>
+ <title>AUTHOR</title>
+
+ <para>
+ The original Samba software and related utilities were created by Andrew Tridgell.
+ Samba is now developed by the Samba Team as an Open Source project similar to the way
+ the Linux kernel is developed.
+ </para>
+
+ <para>The tdbdump man page was written by Jelmer Vernooij.</para>
+</refsect1>
+
+</refentry>
diff --git a/man/tdbrestore.8.xml b/man/tdbrestore.8.xml
new file mode 100644
index 0000000..034db53
--- /dev/null
+++ b/man/tdbrestore.8.xml
@@ -0,0 +1,67 @@
+<?xml version="1.0" encoding="iso-8859-1"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
+<refentry id="tdbrestore.8">
+<refentryinfo><date>2015-04-25</date></refentryinfo>
+
+<refmeta>
+ <refentrytitle>tdbrestore</refentrytitle>
+ <manvolnum>8</manvolnum>
+ <refmiscinfo class="source">Samba</refmiscinfo>
+ <refmiscinfo class="manual">System Administration tools</refmiscinfo>
+ <refmiscinfo class="version">3.6</refmiscinfo>
+</refmeta>
+
+
+<refnamediv>
+ <refname>tdbrestore</refname>
+ <refpurpose>tool for creating a TDB file out of a tdbdump output</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+ <cmdsynopsis>
+ <command>tdbrestore</command>
+ <arg choice="req">tdbfilename</arg>
+ </cmdsynopsis>
+</refsynopsisdiv>
+
+<refsect1>
+ <title>DESCRIPTION</title>
+
+ <para>This tool is part of the <citerefentry><refentrytitle>samba</refentrytitle>
+ <manvolnum>1</manvolnum></citerefentry> suite.</para>
+
+ <para><command>tdbrestore</command> is a very simple utility that 'restores' the
+ contents of dump file into TDB (Trivial DataBase) file. The dump file is obtained from the tdbdump
+ command.
+ </para>
+
+ <para>This tool wait on the standard input for the content of the dump and will write the tdb in the tdbfilename
+ parameter.
+ </para>
+ <para>This tool can be used for unpacking the content of tdb as backup mean.
+ </para>
+</refsect1>
+
+
+<refsect1>
+ <title>VERSION</title>
+
+ <para>This man page is correct for version 3 of the Samba suite.</para>
+</refsect1>
+
+<refsect1>
+ <title>AUTHOR</title>
+
+ <para>
+ The original Samba software and related utilities were created by Andrew Tridgell.
+ Samba is now developed by the Samba Team as an Open Source project similar to the way
+ the Linux kernel is developed.
+
+ This tool was initially written by Volker Lendecke based on an
+ idea by Simon McVittie.
+ </para>
+
+ <para>The tdbrestore man page was written by Matthieu Patou.</para>
+</refsect1>
+
+</refentry>
diff --git a/man/tdbtool.8.xml b/man/tdbtool.8.xml
new file mode 100644
index 0000000..9a9b95e
--- /dev/null
+++ b/man/tdbtool.8.xml
@@ -0,0 +1,265 @@
+<?xml version="1.0" encoding="iso-8859-1"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
+<refentry id="tdbtool.8">
+<refentryinfo><date>2015-04-25</date></refentryinfo>
+
+<refmeta>
+ <refentrytitle>tdbtool</refentrytitle>
+ <manvolnum>8</manvolnum>
+ <refmiscinfo class="source">Samba</refmiscinfo>
+ <refmiscinfo class="manual">System Administration tools</refmiscinfo>
+ <refmiscinfo class="version">4.0</refmiscinfo>
+</refmeta>
+
+
+<refnamediv>
+ <refname>tdbtool</refname>
+ <refpurpose>manipulate the contents TDB files</refpurpose>
+</refnamediv>
+
+<refsynopsisdiv>
+
+ <cmdsynopsis>
+ <command>tdbtool</command>
+ </cmdsynopsis>
+
+ <cmdsynopsis>
+ <command>tdbtool</command>
+ <arg choice="opt">-l</arg>
+ <arg choice="plain">
+ <replaceable>TDBFILE</replaceable>
+ </arg>
+ <arg rep="repeat" choice="opt">
+ <replaceable>COMMANDS</replaceable>
+ </arg>
+ </cmdsynopsis>
+
+</refsynopsisdiv>
+
+<refsect1>
+ <title>DESCRIPTION</title>
+
+ <para>This tool is part of the
+ <citerefentry><refentrytitle>samba</refentrytitle>
+ <manvolnum>1</manvolnum></citerefentry> suite.</para>
+
+ <para><command>tdbtool</command> a tool for displaying and
+ altering the contents of Samba TDB (Trivial DataBase) files. Each
+ of the commands listed below can be entered interactively or
+ provided on the command line.</para>
+
+</refsect1>
+
+<refsect1>
+ <title>OPTIONS</title>
+
+ <variablelist>
+
+ <varlistentry>
+ <term>-l</term>
+ <listitem><para>
+ This options disables any locking, by passing TDB_NOLOCK
+ to tdb_open_ex(). Only use this for database files which
+ are not used by any other process! And also only if it is otherwise not
+ possible to open the database, e.g. databases which were created with
+ mutex locking.
+ </para></listitem>
+ </varlistentry>
+
+ </variablelist>
+</refsect1>
+
+
+
+<refsect1>
+ <title>COMMANDS</title>
+
+ <variablelist>
+
+ <varlistentry>
+ <term><option>create</option>
+ <replaceable>TDBFILE</replaceable></term>
+ <listitem><para>Create a new database named
+ <replaceable>TDBFILE</replaceable>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>open</option>
+ <replaceable>TDBFILE</replaceable></term>
+ <listitem><para>Open an existing database named
+ <replaceable>TDBFILE</replaceable>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>erase</option></term>
+ <listitem><para>Erase the current database.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>dump</option></term>
+ <listitem><para>Dump the current database as strings.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>cdump</option></term>
+ <listitem><para>Dump the current database as connection records.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>keys</option></term>
+ <listitem><para>Dump the current database keys as strings.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>hexkeys</option></term>
+ <listitem><para>Dump the current database keys as hex values.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>info</option></term>
+ <listitem><para>Print summary information about the
+ current database.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>insert</option>
+ <replaceable>KEY</replaceable>
+ <replaceable>DATA</replaceable>
+ </term>
+ <listitem><para>Insert a record into the
+ current database.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>move</option>
+ <replaceable>KEY</replaceable>
+ <replaceable>TDBFILE</replaceable>
+ </term>
+ <listitem><para>Move a record from the
+ current database into <replaceable>TDBFILE</replaceable>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>store</option>
+ <replaceable>KEY</replaceable>
+ <replaceable>DATA</replaceable>
+ </term>
+ <listitem><para>Store (replace) a record in the
+ current database.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>show</option>
+ <replaceable>KEY</replaceable>
+ </term>
+ <listitem><para>Show a record by key.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>delete</option>
+ <replaceable>KEY</replaceable>
+ </term>
+ <listitem><para>Delete a record by key.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>list</option>
+ </term>
+ <listitem><para>Print the current database hash table and free list.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>free</option>
+ </term>
+ <listitem><para>Print the current database and free list.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><option>!</option>
+ <replaceable>COMMAND</replaceable>
+ </term>
+ <listitem><para>Execute the given system command.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>first</option>
+ </term>
+ <listitem><para>Print the first record in the current database.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>next</option>
+ </term>
+ <listitem><para>Print the next record in the current database.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>check</option>
+ </term>
+ <listitem><para>Check the integrity of the current database.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>repack</option>
+ </term>
+ <listitem><para>Repack a database using a temporary file to remove fragmentation.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term>
+ <option>quit</option>
+ </term>
+ <listitem><para>Exit <command>tdbtool</command>.
+ </para></listitem>
+ </varlistentry>
+
+ </variablelist>
+</refsect1>
+
+<refsect1>
+ <title>CAVEATS</title>
+ <para>The contents of the Samba TDB files are private
+ to the implementation and should not be altered with
+ <command>tdbtool</command>.
+ </para>
+</refsect1>
+
+<refsect1>
+ <title>VERSION</title>
+ <para>This man page is correct for version 3.6 of the Samba suite.</para>
+</refsect1>
+
+<refsect1>
+ <title>AUTHOR</title>
+
+ <para> The original Samba software and related utilities were
+ created by Andrew Tridgell. Samba is now developed by the
+ Samba Team as an Open Source project similar to the way the
+ Linux kernel is developed.</para>
+</refsect1>
+
+</refentry>
diff --git a/pytdb.c b/pytdb.c
new file mode 100644
index 0000000..9320799
--- /dev/null
+++ b/pytdb.c
@@ -0,0 +1,707 @@
+/*
+ Unix SMB/CIFS implementation.
+
+ Python interface to tdb.
+
+ Copyright (C) 2004-2006 Tim Potter <tpot@samba.org>
+ Copyright (C) 2007-2008 Jelmer Vernooij <jelmer@samba.org>
+
+ ** NOTE! The following LGPL license applies to the tdb
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <Python.h>
+#include "replace.h"
+#include "system/filesys.h"
+
+/* Include tdb headers */
+#include <tdb.h>
+
+typedef struct {
+ PyObject_HEAD
+ TDB_CONTEXT *ctx;
+ bool closed;
+} PyTdbObject;
+
+staticforward PyTypeObject PyTdb;
+
+static void PyErr_SetTDBError(TDB_CONTEXT *tdb)
+{
+ PyErr_SetObject(PyExc_RuntimeError,
+ Py_BuildValue("(i,s)", tdb_error(tdb), tdb_errorstr(tdb)));
+}
+
+static TDB_DATA PyString_AsTDB_DATA(PyObject *data)
+{
+ TDB_DATA ret;
+ ret.dptr = (unsigned char *)PyString_AsString(data);
+ ret.dsize = PyString_Size(data);
+ return ret;
+}
+
+static PyObject *PyString_FromTDB_DATA(TDB_DATA data)
+{
+ if (data.dptr == NULL && data.dsize == 0) {
+ Py_RETURN_NONE;
+ } else {
+ PyObject *ret = PyString_FromStringAndSize((const char *)data.dptr,
+ data.dsize);
+ free(data.dptr);
+ return ret;
+ }
+}
+
+#define PyErr_TDB_ERROR_IS_ERR_RAISE(ret, tdb) \
+ if (ret != 0) { \
+ PyErr_SetTDBError(tdb); \
+ return NULL; \
+ }
+
+#define PyErr_TDB_RAISE_IF_CLOSED(self) \
+ if (self->closed) { \
+ PyErr_SetObject(PyExc_RuntimeError, \
+ Py_BuildValue("(i,s)", TDB_ERR_IO, "Database is already closed")); \
+ return NULL; \
+ }
+
+#define PyErr_TDB_RAISE_RETURN_MINUS_1_IF_CLOSED(self) \
+ if (self->closed) { \
+ PyErr_SetObject(PyExc_RuntimeError, \
+ Py_BuildValue("(i,s)", TDB_ERR_IO, "Database is already closed")); \
+ return -1; \
+ }
+
+static PyObject *py_tdb_open(PyTypeObject *type, PyObject *args, PyObject *kwargs)
+{
+ char *name = NULL;
+ int hash_size = 0, tdb_flags = TDB_DEFAULT, flags = O_RDWR, mode = 0600;
+ TDB_CONTEXT *ctx;
+ PyTdbObject *ret;
+ const char *_kwnames[] = { "name", "hash_size", "tdb_flags", "flags", "mode", NULL };
+ char **kwnames = discard_const_p(char *, _kwnames);
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|siiii", kwnames, &name, &hash_size, &tdb_flags, &flags, &mode))
+ return NULL;
+
+ if (name == NULL) {
+ tdb_flags |= TDB_INTERNAL;
+ }
+
+ ctx = tdb_open(name, hash_size, tdb_flags, flags, mode);
+ if (ctx == NULL) {
+ PyErr_SetFromErrno(PyExc_IOError);
+ return NULL;
+ }
+
+ ret = PyObject_New(PyTdbObject, &PyTdb);
+ if (!ret) {
+ tdb_close(ctx);
+ return NULL;
+ }
+
+ ret->ctx = ctx;
+ ret->closed = false;
+ return (PyObject *)ret;
+}
+
+static PyObject *obj_transaction_cancel(PyTdbObject *self)
+{
+ int ret;
+
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+
+ ret = tdb_transaction_cancel(self->ctx);
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_transaction_commit(PyTdbObject *self)
+{
+ int ret;
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ ret = tdb_transaction_commit(self->ctx);
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_transaction_prepare_commit(PyTdbObject *self)
+{
+ int ret;
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ ret = tdb_transaction_prepare_commit(self->ctx);
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_transaction_start(PyTdbObject *self)
+{
+ int ret;
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ ret = tdb_transaction_start(self->ctx);
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_reopen(PyTdbObject *self)
+{
+ int ret;
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ ret = tdb_reopen(self->ctx);
+ if (ret != 0) {
+ self->closed = true;
+ PyErr_SetObject(PyExc_RuntimeError,
+ Py_BuildValue("(i,s)",
+ TDB_ERR_IO,
+ "Failed to reopen database"));
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_lockall(PyTdbObject *self)
+{
+ int ret;
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ ret = tdb_lockall(self->ctx);
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_unlockall(PyTdbObject *self)
+{
+ int ret;
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ ret = tdb_unlockall(self->ctx);
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_lockall_read(PyTdbObject *self)
+{
+ int ret;
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ ret = tdb_lockall_read(self->ctx);
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_unlockall_read(PyTdbObject *self)
+{
+ int ret = tdb_unlockall_read(self->ctx);
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_close(PyTdbObject *self)
+{
+ int ret;
+ if (self->closed)
+ Py_RETURN_NONE;
+ ret = tdb_close(self->ctx);
+ self->closed = true;
+ if (ret != 0) {
+ PyErr_SetObject(PyExc_RuntimeError,
+ Py_BuildValue("(i,s)",
+ TDB_ERR_IO,
+ "Failed to close database"));
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_get(PyTdbObject *self, PyObject *args)
+{
+ TDB_DATA key;
+ PyObject *py_key;
+
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+
+ if (!PyArg_ParseTuple(args, "O", &py_key))
+ return NULL;
+
+ key = PyString_AsTDB_DATA(py_key);
+ if (!key.dptr)
+ return NULL;
+
+ return PyString_FromTDB_DATA(tdb_fetch(self->ctx, key));
+}
+
+static PyObject *obj_append(PyTdbObject *self, PyObject *args)
+{
+ TDB_DATA key, data;
+ PyObject *py_key, *py_data;
+ int ret;
+
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+
+ if (!PyArg_ParseTuple(args, "OO", &py_key, &py_data))
+ return NULL;
+
+ key = PyString_AsTDB_DATA(py_key);
+ if (!key.dptr)
+ return NULL;
+ data = PyString_AsTDB_DATA(py_data);
+ if (!data.dptr)
+ return NULL;
+
+ ret = tdb_append(self->ctx, key, data);
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_firstkey(PyTdbObject *self)
+{
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+
+ return PyString_FromTDB_DATA(tdb_firstkey(self->ctx));
+}
+
+static PyObject *obj_nextkey(PyTdbObject *self, PyObject *args)
+{
+ TDB_DATA key;
+ PyObject *py_key;
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+
+ if (!PyArg_ParseTuple(args, "O", &py_key))
+ return NULL;
+
+ key = PyString_AsTDB_DATA(py_key);
+ if (!key.dptr)
+ return NULL;
+
+ return PyString_FromTDB_DATA(tdb_nextkey(self->ctx, key));
+}
+
+static PyObject *obj_delete(PyTdbObject *self, PyObject *args)
+{
+ TDB_DATA key;
+ PyObject *py_key;
+ int ret;
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+
+ if (!PyArg_ParseTuple(args, "O", &py_key))
+ return NULL;
+
+ key = PyString_AsTDB_DATA(py_key);
+ if (!key.dptr)
+ return NULL;
+ ret = tdb_delete(self->ctx, key);
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_has_key(PyTdbObject *self, PyObject *args)
+{
+ TDB_DATA key;
+ int ret;
+ PyObject *py_key;
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+
+ if (!PyArg_ParseTuple(args, "O", &py_key))
+ return NULL;
+
+ key = PyString_AsTDB_DATA(py_key);
+ if (!key.dptr)
+ return NULL;
+ ret = tdb_exists(self->ctx, key);
+ if (ret != TDB_ERR_NOEXIST) {
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ }
+
+ return (ret == TDB_ERR_NOEXIST)?Py_False:Py_True;
+}
+
+static PyObject *obj_store(PyTdbObject *self, PyObject *args)
+{
+ TDB_DATA key, value;
+ int ret;
+ int flag = TDB_REPLACE;
+ PyObject *py_key, *py_value;
+
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+
+ if (!PyArg_ParseTuple(args, "OO|i", &py_key, &py_value, &flag))
+ return NULL;
+
+ key = PyString_AsTDB_DATA(py_key);
+ if (!key.dptr)
+ return NULL;
+ value = PyString_AsTDB_DATA(py_value);
+ if (!value.dptr)
+ return NULL;
+
+ ret = tdb_store(self->ctx, key, value, flag);
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_add_flags(PyTdbObject *self, PyObject *args)
+{
+ unsigned flags;
+
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+
+ if (!PyArg_ParseTuple(args, "I", &flags))
+ return NULL;
+
+ tdb_add_flags(self->ctx, flags);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_remove_flags(PyTdbObject *self, PyObject *args)
+{
+ unsigned flags;
+
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+
+ if (!PyArg_ParseTuple(args, "I", &flags))
+ return NULL;
+
+ tdb_remove_flags(self->ctx, flags);
+ Py_RETURN_NONE;
+}
+
+typedef struct {
+ PyObject_HEAD
+ TDB_DATA current;
+ PyTdbObject *iteratee;
+} PyTdbIteratorObject;
+
+static PyObject *tdb_iter_next(PyTdbIteratorObject *self)
+{
+ TDB_DATA current;
+ PyObject *ret;
+ if (self->current.dptr == NULL && self->current.dsize == 0)
+ return NULL;
+ current = self->current;
+ self->current = tdb_nextkey(self->iteratee->ctx, self->current);
+ ret = PyString_FromTDB_DATA(current);
+ return ret;
+}
+
+static void tdb_iter_dealloc(PyTdbIteratorObject *self)
+{
+ Py_DECREF(self->iteratee);
+ PyObject_Del(self);
+}
+
+PyTypeObject PyTdbIterator = {
+ .tp_name = "Iterator",
+ .tp_basicsize = sizeof(PyTdbIteratorObject),
+ .tp_iternext = (iternextfunc)tdb_iter_next,
+ .tp_dealloc = (destructor)tdb_iter_dealloc,
+ .tp_flags = Py_TPFLAGS_DEFAULT,
+ .tp_iter = PyObject_SelfIter,
+};
+
+static PyObject *tdb_object_iter(PyTdbObject *self)
+{
+ PyTdbIteratorObject *ret;
+
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+
+ ret = PyObject_New(PyTdbIteratorObject, &PyTdbIterator);
+ if (!ret)
+ return NULL;
+ ret->current = tdb_firstkey(self->ctx);
+ ret->iteratee = self;
+ Py_INCREF(self);
+ return (PyObject *)ret;
+}
+
+static PyObject *obj_clear(PyTdbObject *self)
+{
+ int ret;
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ ret = tdb_wipe_all(self->ctx);
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_repack(PyTdbObject *self)
+{
+ int ret;
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ ret = tdb_repack(self->ctx);
+ PyErr_TDB_ERROR_IS_ERR_RAISE(ret, self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_enable_seqnum(PyTdbObject *self)
+{
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ tdb_enable_seqnum(self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyObject *obj_increment_seqnum_nonblock(PyTdbObject *self)
+{
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ tdb_increment_seqnum_nonblock(self->ctx);
+ Py_RETURN_NONE;
+}
+
+static PyMethodDef tdb_object_methods[] = {
+ { "transaction_cancel", (PyCFunction)obj_transaction_cancel, METH_NOARGS,
+ "S.transaction_cancel() -> None\n"
+ "Cancel the currently active transaction." },
+ { "transaction_commit", (PyCFunction)obj_transaction_commit, METH_NOARGS,
+ "S.transaction_commit() -> None\n"
+ "Commit the currently active transaction." },
+ { "transaction_prepare_commit", (PyCFunction)obj_transaction_prepare_commit, METH_NOARGS,
+ "S.transaction_prepare_commit() -> None\n"
+ "Prepare to commit the currently active transaction" },
+ { "transaction_start", (PyCFunction)obj_transaction_start, METH_NOARGS,
+ "S.transaction_start() -> None\n"
+ "Start a new transaction." },
+ { "reopen", (PyCFunction)obj_reopen, METH_NOARGS, "Reopen this file." },
+ { "lock_all", (PyCFunction)obj_lockall, METH_NOARGS, NULL },
+ { "unlock_all", (PyCFunction)obj_unlockall, METH_NOARGS, NULL },
+ { "read_lock_all", (PyCFunction)obj_lockall_read, METH_NOARGS, NULL },
+ { "read_unlock_all", (PyCFunction)obj_unlockall_read, METH_NOARGS, NULL },
+ { "close", (PyCFunction)obj_close, METH_NOARGS, NULL },
+ { "get", (PyCFunction)obj_get, METH_VARARGS, "S.get(key) -> value\n"
+ "Fetch a value." },
+ { "append", (PyCFunction)obj_append, METH_VARARGS, "S.append(key, value) -> None\n"
+ "Append data to an existing key." },
+ { "firstkey", (PyCFunction)obj_firstkey, METH_NOARGS, "S.firstkey() -> data\n"
+ "Return the first key in this database." },
+ { "nextkey", (PyCFunction)obj_nextkey, METH_NOARGS, "S.nextkey(key) -> data\n"
+ "Return the next key in this database." },
+ { "delete", (PyCFunction)obj_delete, METH_VARARGS, "S.delete(key) -> None\n"
+ "Delete an entry." },
+ { "has_key", (PyCFunction)obj_has_key, METH_VARARGS, "S.has_key(key) -> None\n"
+ "Check whether key exists in this database." },
+ { "store", (PyCFunction)obj_store, METH_VARARGS, "S.store(key, data, flag=REPLACE) -> None"
+ "Store data." },
+ { "add_flags", (PyCFunction)obj_add_flags, METH_VARARGS, "S.add_flags(flags) -> None" },
+ { "remove_flags", (PyCFunction)obj_remove_flags, METH_VARARGS, "S.remove_flags(flags) -> None" },
+ { "iterkeys", (PyCFunction)tdb_object_iter, METH_NOARGS, "S.iterkeys() -> iterator" },
+ { "clear", (PyCFunction)obj_clear, METH_NOARGS, "S.clear() -> None\n"
+ "Wipe the entire database." },
+ { "repack", (PyCFunction)obj_repack, METH_NOARGS, "S.repack() -> None\n"
+ "Repack the entire database." },
+ { "enable_seqnum", (PyCFunction)obj_enable_seqnum, METH_NOARGS,
+ "S.enable_seqnum() -> None" },
+ { "increment_seqnum_nonblock", (PyCFunction)obj_increment_seqnum_nonblock, METH_NOARGS,
+ "S.increment_seqnum_nonblock() -> None" },
+ { NULL }
+};
+
+static PyObject *obj_get_hash_size(PyTdbObject *self, void *closure)
+{
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ return PyInt_FromLong(tdb_hash_size(self->ctx));
+}
+
+static int obj_set_max_dead(PyTdbObject *self, PyObject *max_dead, void *closure)
+{
+ PyErr_TDB_RAISE_RETURN_MINUS_1_IF_CLOSED(self);
+ if (!PyInt_Check(max_dead))
+ return -1;
+ tdb_set_max_dead(self->ctx, PyInt_AsLong(max_dead));
+ return 0;
+}
+
+static PyObject *obj_get_map_size(PyTdbObject *self, void *closure)
+{
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ return PyInt_FromLong(tdb_map_size(self->ctx));
+}
+
+static PyObject *obj_get_freelist_size(PyTdbObject *self, void *closure)
+{
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ return PyInt_FromLong(tdb_freelist_size(self->ctx));
+}
+
+static PyObject *obj_get_flags(PyTdbObject *self, void *closure)
+{
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ return PyInt_FromLong(tdb_get_flags(self->ctx));
+}
+
+static PyObject *obj_get_filename(PyTdbObject *self, void *closure)
+{
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ return PyString_FromString(tdb_name(self->ctx));
+}
+
+static PyObject *obj_get_seqnum(PyTdbObject *self, void *closure)
+{
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ return PyInt_FromLong(tdb_get_seqnum(self->ctx));
+}
+
+
+static PyGetSetDef tdb_object_getsetters[] = {
+ { discard_const_p(char, "hash_size"),
+ (getter)obj_get_hash_size, NULL, NULL },
+ { discard_const_p(char, "map_size"),
+ (getter)obj_get_map_size, NULL, NULL },
+ { discard_const_p(char, "freelist_size"),
+ (getter)obj_get_freelist_size, NULL, NULL },
+ { discard_const_p(char, "flags"),
+ (getter)obj_get_flags, NULL, NULL },
+ { discard_const_p(char, "max_dead"),
+ NULL, (setter)obj_set_max_dead, NULL },
+ { discard_const_p(char, "filename"),
+ (getter)obj_get_filename, NULL,
+ discard_const_p(char, "The filename of this TDB file.") },
+ { discard_const_p(char, "seqnum"),
+ (getter)obj_get_seqnum, NULL, NULL },
+ { NULL }
+};
+
+static PyObject *tdb_object_repr(PyTdbObject *self)
+{
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ if (tdb_get_flags(self->ctx) & TDB_INTERNAL) {
+ return PyString_FromString("Tdb(<internal>)");
+ } else {
+ return PyString_FromFormat("Tdb('%s')", tdb_name(self->ctx));
+ }
+}
+
+static void tdb_object_dealloc(PyTdbObject *self)
+{
+ if (!self->closed)
+ tdb_close(self->ctx);
+ self->ob_type->tp_free(self);
+}
+
+static PyObject *obj_getitem(PyTdbObject *self, PyObject *key)
+{
+ TDB_DATA tkey, val;
+ PyErr_TDB_RAISE_IF_CLOSED(self);
+ if (!PyString_Check(key)) {
+ PyErr_SetString(PyExc_TypeError, "Expected string as key");
+ return NULL;
+ }
+
+ tkey.dptr = (unsigned char *)PyString_AsString(key);
+ tkey.dsize = PyString_Size(key);
+
+ val = tdb_fetch(self->ctx, tkey);
+ if (val.dptr == NULL) {
+ PyErr_SetString(PyExc_KeyError, "No such TDB entry");
+ return NULL;
+ } else {
+ return PyString_FromTDB_DATA(val);
+ }
+}
+
+static int obj_setitem(PyTdbObject *self, PyObject *key, PyObject *value)
+{
+ TDB_DATA tkey, tval;
+ int ret;
+ PyErr_TDB_RAISE_RETURN_MINUS_1_IF_CLOSED(self);
+ if (!PyString_Check(key)) {
+ PyErr_SetString(PyExc_TypeError, "Expected string as key");
+ return -1;
+ }
+
+ tkey = PyString_AsTDB_DATA(key);
+
+ if (value == NULL) {
+ ret = tdb_delete(self->ctx, tkey);
+ } else {
+ if (!PyString_Check(value)) {
+ PyErr_SetString(PyExc_TypeError, "Expected string as value");
+ return -1;
+ }
+
+ tval = PyString_AsTDB_DATA(value);
+
+ ret = tdb_store(self->ctx, tkey, tval, TDB_REPLACE);
+ }
+
+ if (ret != 0) {
+ PyErr_SetTDBError(self->ctx);
+ return -1;
+ }
+
+ return ret;
+}
+
+static PyMappingMethods tdb_object_mapping = {
+ .mp_subscript = (binaryfunc)obj_getitem,
+ .mp_ass_subscript = (objobjargproc)obj_setitem,
+};
+static PyTypeObject PyTdb = {
+ .tp_name = "tdb.Tdb",
+ .tp_basicsize = sizeof(PyTdbObject),
+ .tp_methods = tdb_object_methods,
+ .tp_getset = tdb_object_getsetters,
+ .tp_new = py_tdb_open,
+ .tp_doc = "A TDB file",
+ .tp_repr = (reprfunc)tdb_object_repr,
+ .tp_dealloc = (destructor)tdb_object_dealloc,
+ .tp_as_mapping = &tdb_object_mapping,
+ .tp_flags = Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_ITER,
+ .tp_iter = (getiterfunc)tdb_object_iter,
+};
+
+static PyMethodDef tdb_methods[] = {
+ { "open", (PyCFunction)py_tdb_open, METH_VARARGS|METH_KEYWORDS, "open(name, hash_size=0, tdb_flags=TDB_DEFAULT, flags=O_RDWR, mode=0600)\n"
+ "Open a TDB file." },
+ { NULL }
+};
+
+void inittdb(void);
+void inittdb(void)
+{
+ PyObject *m;
+
+ if (PyType_Ready(&PyTdb) < 0)
+ return;
+
+ if (PyType_Ready(&PyTdbIterator) < 0)
+ return;
+
+ m = Py_InitModule3("tdb", tdb_methods,
+ "simple key-value database that supports multiple writers.");
+ if (m == NULL)
+ return;
+
+ PyModule_AddObject(m, "REPLACE", PyInt_FromLong(TDB_REPLACE));
+ PyModule_AddObject(m, "INSERT", PyInt_FromLong(TDB_INSERT));
+ PyModule_AddObject(m, "MODIFY", PyInt_FromLong(TDB_MODIFY));
+
+ PyModule_AddObject(m, "DEFAULT", PyInt_FromLong(TDB_DEFAULT));
+ PyModule_AddObject(m, "CLEAR_IF_FIRST", PyInt_FromLong(TDB_CLEAR_IF_FIRST));
+ PyModule_AddObject(m, "INTERNAL", PyInt_FromLong(TDB_INTERNAL));
+ PyModule_AddObject(m, "NOLOCK", PyInt_FromLong(TDB_NOLOCK));
+ PyModule_AddObject(m, "NOMMAP", PyInt_FromLong(TDB_NOMMAP));
+ PyModule_AddObject(m, "CONVERT", PyInt_FromLong(TDB_CONVERT));
+ PyModule_AddObject(m, "BIGENDIAN", PyInt_FromLong(TDB_BIGENDIAN));
+ PyModule_AddObject(m, "NOSYNC", PyInt_FromLong(TDB_NOSYNC));
+ PyModule_AddObject(m, "SEQNUM", PyInt_FromLong(TDB_SEQNUM));
+ PyModule_AddObject(m, "VOLATILE", PyInt_FromLong(TDB_VOLATILE));
+ PyModule_AddObject(m, "ALLOW_NESTING", PyInt_FromLong(TDB_ALLOW_NESTING));
+ PyModule_AddObject(m, "DISALLOW_NESTING", PyInt_FromLong(TDB_DISALLOW_NESTING));
+ PyModule_AddObject(m, "INCOMPATIBLE_HASH", PyInt_FromLong(TDB_INCOMPATIBLE_HASH));
+
+ PyModule_AddObject(m, "__docformat__", PyString_FromString("restructuredText"));
+
+ PyModule_AddObject(m, "__version__", PyString_FromString(PACKAGE_VERSION));
+
+ Py_INCREF(&PyTdb);
+ PyModule_AddObject(m, "Tdb", (PyObject *)&PyTdb);
+
+ Py_INCREF(&PyTdbIterator);
+}
diff --git a/python/tdbdump.py b/python/tdbdump.py
new file mode 100644
index 0000000..01859eb
--- /dev/null
+++ b/python/tdbdump.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+# Trivial reimplementation of tdbdump in Python
+
+import tdb, sys
+
+if len(sys.argv) < 2:
+ print "Usage: tdbdump.py <tdb-file>"
+ sys.exit(1)
+
+db = tdb.Tdb(sys.argv[1])
+for (k, v) in db.iteritems():
+ print "{\nkey(%d) = %r\ndata(%d) = %r\n}" % (len(k), k, len(v), v)
diff --git a/python/tests/simple.py b/python/tests/simple.py
new file mode 100644
index 0000000..4751f9b
--- /dev/null
+++ b/python/tests/simple.py
@@ -0,0 +1,183 @@
+#!/usr/bin/env python
+# Some simple tests for the Python bindings for TDB
+# Note that this tests the interface of the Python bindings
+# It does not test tdb itself.
+#
+# Copyright (C) 2007-2008 Jelmer Vernooij <jelmer@samba.org>
+# Published under the GNU LGPLv3 or later
+
+import tdb
+from unittest import TestCase
+import os, tempfile
+
+
+class OpenTdbTests(TestCase):
+
+ def test_nonexistent_read(self):
+ self.assertRaises(IOError, tdb.Tdb, "/some/nonexistent/file", 0,
+ tdb.DEFAULT, os.O_RDWR)
+
+class CloseTdbTests(TestCase):
+
+ def test_double_close(self):
+ self.tdb = tdb.Tdb(tempfile.mkstemp()[1], 0, tdb.DEFAULT,
+ os.O_CREAT|os.O_RDWR)
+ self.assertNotEqual(None, self.tdb)
+
+ # ensure that double close does not crash python
+ self.tdb.close()
+ self.tdb.close()
+
+ # Check that further operations do not crash python
+ self.assertRaises(RuntimeError, lambda: self.tdb.transaction_start())
+
+ self.assertRaises(RuntimeError, lambda: self.tdb["bar"])
+
+
+class InternalTdbTests(TestCase):
+
+ def test_repr(self):
+ self.tdb = tdb.Tdb()
+
+ # repr used to crash on internal db
+ self.assertEquals(repr(self.tdb), "Tdb(<internal>)")
+
+
+class SimpleTdbTests(TestCase):
+
+ def setUp(self):
+ super(SimpleTdbTests, self).setUp()
+ self.tdb = tdb.Tdb(tempfile.mkstemp()[1], 0, tdb.DEFAULT,
+ os.O_CREAT|os.O_RDWR)
+ self.assertNotEqual(None, self.tdb)
+
+ def tearDown(self):
+ del self.tdb
+
+ def test_repr(self):
+ self.assertTrue(repr(self.tdb).startswith("Tdb('"))
+
+ def test_lockall(self):
+ self.tdb.lock_all()
+
+ def test_max_dead(self):
+ self.tdb.max_dead = 20
+
+ def test_unlockall(self):
+ self.tdb.lock_all()
+ self.tdb.unlock_all()
+
+ def test_lockall_read(self):
+ self.tdb.read_lock_all()
+ self.tdb.read_unlock_all()
+
+ def test_reopen(self):
+ self.tdb.reopen()
+
+ def test_store(self):
+ self.tdb.store("bar", "bla")
+ self.assertEquals("bla", self.tdb.get("bar"))
+
+ def test_getitem(self):
+ self.tdb["bar"] = "foo"
+ self.tdb.reopen()
+ self.assertEquals("foo", self.tdb["bar"])
+
+ def test_delete(self):
+ self.tdb["bar"] = "foo"
+ del self.tdb["bar"]
+ self.assertRaises(KeyError, lambda: self.tdb["bar"])
+
+ def test_contains(self):
+ self.tdb["bla"] = "bloe"
+ self.assertTrue("bla" in self.tdb)
+
+ def test_keyerror(self):
+ self.assertRaises(KeyError, lambda: self.tdb["bla"])
+
+ def test_hash_size(self):
+ self.tdb.hash_size
+
+ def test_map_size(self):
+ self.tdb.map_size
+
+ def test_freelist_size(self):
+ self.tdb.freelist_size
+
+ def test_name(self):
+ self.tdb.filename
+
+ def test_iterator(self):
+ self.tdb["bla"] = "1"
+ self.tdb["brainslug"] = "2"
+ l = list(self.tdb)
+ l.sort()
+ self.assertEquals(["bla", "brainslug"], l)
+
+ def test_transaction_cancel(self):
+ self.tdb["bloe"] = "2"
+ self.tdb.transaction_start()
+ self.tdb["bloe"] = "1"
+ self.tdb.transaction_cancel()
+ self.assertEquals("2", self.tdb["bloe"])
+
+ def test_transaction_commit(self):
+ self.tdb["bloe"] = "2"
+ self.tdb.transaction_start()
+ self.tdb["bloe"] = "1"
+ self.tdb.transaction_commit()
+ self.assertEquals("1", self.tdb["bloe"])
+
+ def test_transaction_prepare_commit(self):
+ self.tdb["bloe"] = "2"
+ self.tdb.transaction_start()
+ self.tdb["bloe"] = "1"
+ self.tdb.transaction_prepare_commit()
+ self.tdb.transaction_commit()
+ self.assertEquals("1", self.tdb["bloe"])
+
+ def test_iterkeys(self):
+ self.tdb["bloe"] = "2"
+ self.tdb["bla"] = "25"
+ i = self.tdb.iterkeys()
+ self.assertEquals(set(["bloe", "bla"]), set([i.next(), i.next()]))
+
+ def test_clear(self):
+ self.tdb["bloe"] = "2"
+ self.tdb["bla"] = "25"
+ self.assertEquals(2, len(list(self.tdb)))
+ self.tdb.clear()
+ self.assertEquals(0, len(list(self.tdb)))
+
+ def test_repack(self):
+ self.tdb["foo"] = "abc"
+ self.tdb["bar"] = "def"
+ del self.tdb["foo"]
+ self.tdb.repack()
+
+ def test_seqnum(self):
+ self.tdb.enable_seqnum()
+ seq1 = self.tdb.seqnum
+ self.tdb.increment_seqnum_nonblock()
+ seq2 = self.tdb.seqnum
+ self.assertEquals(seq2-seq1, 1)
+
+ def test_len(self):
+ self.assertEquals(0, len(list(self.tdb)))
+ self.tdb["entry"] = "value"
+ self.assertEquals(1, len(list(self.tdb)))
+
+ def test_add_flags(self):
+ self.tdb.add_flags(tdb.NOMMAP)
+ self.tdb.remove_flags(tdb.NOMMAP)
+
+
+class VersionTests(TestCase):
+
+ def test_present(self):
+ self.assertTrue(isinstance(tdb.__version__, str))
+
+
+if __name__ == '__main__':
+ import unittest
+ unittest.TestProgram()
diff --git a/tdb.pc.in b/tdb.pc.in
new file mode 100644
index 0000000..b78419e
--- /dev/null
+++ b/tdb.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: tdb
+Description: A trivial database
+Version: @PACKAGE_VERSION@
+Libs: @LIB_RPATH@ -L${libdir} -ltdb
+Cflags: -I${includedir}
+URL: http://tdb.samba.org/
diff --git a/test/external-agent.c b/test/external-agent.c
new file mode 100644
index 0000000..443d382
--- /dev/null
+++ b/test/external-agent.c
@@ -0,0 +1,223 @@
+#include "external-agent.h"
+#include "lock-tracking.h"
+#include "logging.h"
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <string.h>
+#include <errno.h>
+#include "../common/tdb_private.h"
+#include "tap-interface.h"
+#include <stdio.h>
+#include <stdarg.h>
+
+static struct tdb_context *tdb;
+
+static enum agent_return do_operation(enum operation op, const char *name)
+{
+ TDB_DATA k;
+ enum agent_return ret;
+ TDB_DATA data;
+
+ if (op != OPEN && op != OPEN_WITH_CLEAR_IF_FIRST && !tdb) {
+ diag("external: No tdb open!");
+ return OTHER_FAILURE;
+ }
+
+ k.dptr = discard_const_p(uint8_t, name);
+ k.dsize = strlen(name);
+
+ locking_would_block = 0;
+ switch (op) {
+ case OPEN:
+ if (tdb) {
+ diag("Already have tdb %s open", tdb_name(tdb));
+ return OTHER_FAILURE;
+ }
+ tdb = tdb_open_ex(name, 0, TDB_DEFAULT, O_RDWR, 0,
+ &taplogctx, NULL);
+ if (!tdb) {
+ if (!locking_would_block)
+ diag("Opening tdb gave %s", strerror(errno));
+ ret = OTHER_FAILURE;
+ } else
+ ret = SUCCESS;
+ break;
+ case OPEN_WITH_CLEAR_IF_FIRST:
+ if (tdb)
+ return OTHER_FAILURE;
+ tdb = tdb_open_ex(name, 0, TDB_CLEAR_IF_FIRST, O_RDWR, 0,
+ &taplogctx, NULL);
+ ret = tdb ? SUCCESS : OTHER_FAILURE;
+ break;
+ case TRANSACTION_START:
+ ret = tdb_transaction_start(tdb) == 0 ? SUCCESS : OTHER_FAILURE;
+ break;
+ case FETCH:
+ data = tdb_fetch(tdb, k);
+ if (data.dptr == NULL) {
+ if (tdb_error(tdb) == TDB_ERR_NOEXIST)
+ ret = FAILED;
+ else
+ ret = OTHER_FAILURE;
+ } else if (data.dsize != k.dsize
+ || memcmp(data.dptr, k.dptr, k.dsize) != 0) {
+ ret = OTHER_FAILURE;
+ } else {
+ ret = SUCCESS;
+ }
+ free(data.dptr);
+ break;
+ case STORE:
+ ret = tdb_store(tdb, k, k, 0) == 0 ? SUCCESS : OTHER_FAILURE;
+ break;
+ case TRANSACTION_COMMIT:
+ ret = tdb_transaction_commit(tdb)==0 ? SUCCESS : OTHER_FAILURE;
+ break;
+ case CHECK:
+ ret = tdb_check(tdb, NULL, NULL) == 0 ? SUCCESS : OTHER_FAILURE;
+ break;
+ case NEEDS_RECOVERY:
+ ret = tdb_needs_recovery(tdb) ? SUCCESS : FAILED;
+ break;
+ case CLOSE:
+ ret = tdb_close(tdb) == 0 ? SUCCESS : OTHER_FAILURE;
+ tdb = NULL;
+ break;
+ case PING:
+ ret = SUCCESS;
+ break;
+ case UNMAP:
+ ret = tdb_munmap(tdb) == 0 ? SUCCESS : OTHER_FAILURE;
+ if (ret == SUCCESS) {
+ tdb->flags |= TDB_NOMMAP;
+ }
+ break;
+ default:
+ ret = OTHER_FAILURE;
+ }
+
+ if (locking_would_block)
+ ret = WOULD_HAVE_BLOCKED;
+
+ return ret;
+}
+
+struct agent {
+ int cmdfd, responsefd;
+ pid_t pid;
+};
+
+/* Do this before doing any tdb stuff. Return handle, or NULL. */
+struct agent *prepare_external_agent(void)
+{
+ int ret;
+ int command[2], response[2];
+ char name[1+PATH_MAX];
+ struct agent *agent = malloc(sizeof(*agent));
+
+ if (pipe(command) != 0 || pipe(response) != 0) {
+ fprintf(stderr, "pipe failed: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ agent->pid = fork();
+ if (agent->pid < 0) {
+ fprintf(stderr, "fork failed: %s\n", strerror(errno));
+ exit(1);
+ }
+
+ if (agent->pid != 0) {
+ close(command[0]);
+ close(response[1]);
+ agent->cmdfd = command[1];
+ agent->responsefd = response[0];
+ return agent;
+ }
+
+ close(command[1]);
+ close(response[0]);
+
+ /* We want to fail, not block. */
+ nonblocking_locks = true;
+ log_prefix = "external: ";
+ while ((ret = read(command[0], name, sizeof(name))) > 0) {
+ enum agent_return result;
+
+ result = do_operation(name[0], name+1);
+ if (write(response[1], &result, sizeof(result))
+ != sizeof(result))
+ abort();
+ }
+ exit(0);
+}
+
+void shutdown_agent(struct agent *agent)
+{
+ pid_t p;
+
+ close(agent->cmdfd);
+ close(agent->responsefd);
+ p = waitpid(agent->pid, NULL, WNOHANG);
+ if (p == 0) {
+ kill(agent->pid, SIGKILL);
+ }
+ waitpid(agent->pid, NULL, 0);
+ free(agent);
+}
+
+/* Ask the external agent to try to do an operation. */
+enum agent_return external_agent_operation(struct agent *agent,
+ enum operation op,
+ const char *name)
+{
+ enum agent_return res;
+ unsigned int len;
+ char *string;
+
+ if (!name)
+ name = "";
+ len = 1 + strlen(name) + 1;
+ string = malloc(len);
+
+ string[0] = op;
+ strcpy(string+1, name);
+
+ if (write(agent->cmdfd, string, len) != len
+ || read(agent->responsefd, &res, sizeof(res)) != sizeof(res))
+ res = AGENT_DIED;
+
+ free(string);
+ return res;
+}
+
+const char *agent_return_name(enum agent_return ret)
+{
+ return ret == SUCCESS ? "SUCCESS"
+ : ret == WOULD_HAVE_BLOCKED ? "WOULD_HAVE_BLOCKED"
+ : ret == AGENT_DIED ? "AGENT_DIED"
+ : ret == FAILED ? "FAILED"
+ : ret == OTHER_FAILURE ? "OTHER_FAILURE"
+ : "**INVALID**";
+}
+
+const char *operation_name(enum operation op)
+{
+ switch (op) {
+ case OPEN: return "OPEN";
+ case OPEN_WITH_CLEAR_IF_FIRST: return "OPEN_WITH_CLEAR_IF_FIRST";
+ case TRANSACTION_START: return "TRANSACTION_START";
+ case FETCH: return "FETCH";
+ case STORE: return "STORE";
+ case TRANSACTION_COMMIT: return "TRANSACTION_COMMIT";
+ case CHECK: return "CHECK";
+ case NEEDS_RECOVERY: return "NEEDS_RECOVERY";
+ case CLOSE: return "CLOSE";
+ case PING: return "PING";
+ case UNMAP: return "UNMAP";
+ }
+ return "**INVALID**";
+}
diff --git a/test/external-agent.h b/test/external-agent.h
new file mode 100644
index 0000000..de9d0ac
--- /dev/null
+++ b/test/external-agent.h
@@ -0,0 +1,44 @@
+#ifndef TDB_TEST_EXTERNAL_AGENT_H
+#define TDB_TEST_EXTERNAL_AGENT_H
+
+/* For locking tests, we need a different process to try things at
+ * various times. */
+enum operation {
+ OPEN,
+ OPEN_WITH_CLEAR_IF_FIRST,
+ TRANSACTION_START,
+ FETCH,
+ STORE,
+ TRANSACTION_COMMIT,
+ CHECK,
+ NEEDS_RECOVERY,
+ CLOSE,
+ PING,
+ UNMAP,
+};
+
+/* Do this before doing any tdb stuff. Return handle, or -1. */
+struct agent *prepare_external_agent(void);
+void shutdown_agent(struct agent *agent);
+
+enum agent_return {
+ SUCCESS,
+ WOULD_HAVE_BLOCKED,
+ AGENT_DIED,
+ FAILED, /* For fetch, or NEEDS_RECOVERY */
+ OTHER_FAILURE,
+};
+
+/* Ask the external agent to try to do an operation.
+ * name == tdb name for OPEN/OPEN_WITH_CLEAR_IF_FIRST,
+ * record name for FETCH/STORE (store stores name as data too)
+ */
+enum agent_return external_agent_operation(struct agent *handle,
+ enum operation op,
+ const char *name);
+
+/* Mapping enum -> string. */
+const char *agent_return_name(enum agent_return ret);
+const char *operation_name(enum operation op);
+
+#endif /* TDB_TEST_EXTERNAL_AGENT_H */
diff --git a/test/jenkins-be-hash.tdb b/test/jenkins-be-hash.tdb
new file mode 100644
index 0000000..b652840
--- /dev/null
+++ b/test/jenkins-be-hash.tdb
Binary files differ
diff --git a/test/jenkins-le-hash.tdb b/test/jenkins-le-hash.tdb
new file mode 100644
index 0000000..007e0a3
--- /dev/null
+++ b/test/jenkins-le-hash.tdb
Binary files differ
diff --git a/test/lock-tracking.c b/test/lock-tracking.c
new file mode 100644
index 0000000..b2f092c
--- /dev/null
+++ b/test/lock-tracking.c
@@ -0,0 +1,157 @@
+/* We save the locks so we can reaquire them. */
+#include "../common/tdb_private.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include "tap-interface.h"
+#include "lock-tracking.h"
+
+struct testlock {
+ struct testlock *next;
+ unsigned int off;
+ unsigned int len;
+ int type;
+};
+static struct testlock *testlocks;
+int locking_errors = 0;
+bool suppress_lockcheck = false;
+bool nonblocking_locks;
+int locking_would_block = 0;
+void (*unlock_callback)(int fd);
+
+int fcntl_with_lockcheck(int fd, int cmd, ... /* arg */ )
+{
+ va_list ap;
+ int ret, arg3;
+ struct flock *fl;
+ bool may_block = false;
+
+ if (cmd != F_SETLK && cmd != F_SETLKW) {
+ /* This may be totally bogus, but we don't know in general. */
+ va_start(ap, cmd);
+ arg3 = va_arg(ap, int);
+ va_end(ap);
+
+ return fcntl(fd, cmd, arg3);
+ }
+
+ va_start(ap, cmd);
+ fl = va_arg(ap, struct flock *);
+ va_end(ap);
+
+ if (cmd == F_SETLKW && nonblocking_locks) {
+ cmd = F_SETLK;
+ may_block = true;
+ }
+ ret = fcntl(fd, cmd, fl);
+
+ /* Detect when we failed, but might have been OK if we waited. */
+ if (may_block && ret == -1 && (errno == EAGAIN || errno == EACCES)) {
+ locking_would_block++;
+ }
+
+ if (fl->l_type == F_UNLCK) {
+ struct testlock **l;
+ struct testlock *old = NULL;
+
+ for (l = &testlocks; *l; l = &(*l)->next) {
+ if ((*l)->off == fl->l_start
+ && (*l)->len == fl->l_len) {
+ if (ret == 0) {
+ old = *l;
+ *l = (*l)->next;
+ free(old);
+ }
+ break;
+ }
+ if (((*l)->off == fl->l_start)
+ && ((*l)->len == 0)
+ && (ret == 0)) {
+ /*
+ * Remove a piece from the start of the
+ * allrecord_lock
+ */
+ old = *l;
+ (*l)->off += fl->l_len;
+ break;
+ }
+ }
+ if (!old && !suppress_lockcheck) {
+ diag("Unknown unlock %u@%u - %i",
+ (int)fl->l_len, (int)fl->l_start, ret);
+ locking_errors++;
+ }
+ } else {
+ struct testlock *new, *i;
+ unsigned int fl_end = fl->l_start + fl->l_len;
+ if (fl->l_len == 0)
+ fl_end = (unsigned int)-1;
+
+ /* Check for overlaps: we shouldn't do this. */
+ for (i = testlocks; i; i = i->next) {
+ unsigned int i_end = i->off + i->len;
+ if (i->len == 0)
+ i_end = (unsigned int)-1;
+
+ if (fl->l_start >= i->off && fl->l_start < i_end)
+ break;
+ if (fl_end >= i->off && fl_end < i_end)
+ break;
+
+ /* tdb_allrecord_lock does this, handle adjacent: */
+ if (fl->l_start == i_end && fl->l_type == i->type) {
+ if (ret == 0) {
+ i->len = fl->l_len
+ ? i->len + fl->l_len
+ : 0;
+ }
+ goto done;
+ }
+ }
+ if (i) {
+ /* Special case: upgrade of allrecord lock. */
+ if (i->type == F_RDLCK && fl->l_type == F_WRLCK
+ && i->off == FREELIST_TOP
+ && fl->l_start == FREELIST_TOP
+ && i->len == 0
+ && fl->l_len == 0) {
+ if (ret == 0)
+ i->type = F_WRLCK;
+ goto done;
+ }
+ if (!suppress_lockcheck) {
+ diag("%s testlock %u@%u overlaps %u@%u",
+ fl->l_type == F_WRLCK ? "write" : "read",
+ (int)fl->l_len, (int)fl->l_start,
+ i->len, (int)i->off);
+ locking_errors++;
+ }
+ }
+
+ if (ret == 0) {
+ new = malloc(sizeof *new);
+ new->off = fl->l_start;
+ new->len = fl->l_len;
+ new->type = fl->l_type;
+ new->next = testlocks;
+ testlocks = new;
+ }
+ }
+done:
+ if (ret == 0 && fl->l_type == F_UNLCK && unlock_callback)
+ unlock_callback(fd);
+ return ret;
+}
+
+unsigned int forget_locking(void)
+{
+ unsigned int num = 0;
+ while (testlocks) {
+ struct testlock *next = testlocks->next;
+ free(testlocks);
+ testlocks = next;
+ num++;
+ }
+ return num;
+}
diff --git a/test/lock-tracking.h b/test/lock-tracking.h
new file mode 100644
index 0000000..f2c9c44
--- /dev/null
+++ b/test/lock-tracking.h
@@ -0,0 +1,25 @@
+#ifndef LOCK_TRACKING_H
+#define LOCK_TRACKING_H
+#include <stdbool.h>
+
+/* Set this if you want a callback after fnctl unlock. */
+extern void (*unlock_callback)(int fd);
+
+/* Replacement fcntl. */
+int fcntl_with_lockcheck(int fd, int cmd, ... /* arg */ );
+
+/* Discard locking info: returns number of locks outstanding. */
+unsigned int forget_locking(void);
+
+/* Number of errors in locking. */
+extern int locking_errors;
+
+/* Suppress lock checking. */
+extern bool suppress_lockcheck;
+
+/* Make all locks non-blocking. */
+extern bool nonblocking_locks;
+
+/* Number of times we failed a lock because we made it non-blocking. */
+extern int locking_would_block;
+#endif /* LOCK_TRACKING_H */
diff --git a/test/logging.c b/test/logging.c
new file mode 100644
index 0000000..dfab486
--- /dev/null
+++ b/test/logging.c
@@ -0,0 +1,33 @@
+#include "logging.h"
+#include "tap-interface.h"
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+
+bool suppress_logging = false;
+const char *log_prefix = "";
+
+/* Turn log messages into tap diag messages. */
+static void taplog(struct tdb_context *tdb,
+ enum tdb_debug_level level,
+ const char *fmt, ...)
+{
+ va_list ap;
+ char line[200];
+
+ if (suppress_logging)
+ return;
+
+ va_start(ap, fmt);
+ vsprintf(line, fmt, ap);
+ va_end(ap);
+
+ /* Strip trailing \n: diag adds it. */
+ if (line[0] && line[strlen(line)-1] == '\n')
+ diag("%s%.*s", log_prefix, (unsigned)strlen(line)-1, line);
+ else
+ diag("%s%s", log_prefix, line);
+}
+
+struct tdb_logging_context taplogctx = { taplog, NULL };
diff --git a/test/logging.h b/test/logging.h
new file mode 100644
index 0000000..89e77b2
--- /dev/null
+++ b/test/logging.h
@@ -0,0 +1,11 @@
+#ifndef TDB_TEST_LOGGING_H
+#define TDB_TEST_LOGGING_H
+#include "replace.h"
+#include "../include/tdb.h"
+#include <stdbool.h>
+
+extern bool suppress_logging;
+extern const char *log_prefix;
+extern struct tdb_logging_context taplogctx;
+
+#endif /* TDB_TEST_LOGGING_H */
diff --git a/test/old-nohash-be.tdb b/test/old-nohash-be.tdb
new file mode 100644
index 0000000..1c49116
--- /dev/null
+++ b/test/old-nohash-be.tdb
Binary files differ
diff --git a/test/old-nohash-le.tdb b/test/old-nohash-le.tdb
new file mode 100644
index 0000000..0655072
--- /dev/null
+++ b/test/old-nohash-le.tdb
Binary files differ
diff --git a/test/run-3G-file.c b/test/run-3G-file.c
new file mode 100644
index 0000000..748c972
--- /dev/null
+++ b/test/run-3G-file.c
@@ -0,0 +1,145 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include "logging.h"
+
+static int tdb_expand_file_sparse(struct tdb_context *tdb,
+ tdb_off_t size,
+ tdb_off_t addition)
+{
+ if (tdb->read_only || tdb->traverse_read) {
+ tdb->ecode = TDB_ERR_RDONLY;
+ return -1;
+ }
+
+ if (tdb_ftruncate(tdb, size+addition) == -1) {
+ char b = 0;
+ ssize_t written = tdb_pwrite(tdb, &b, 1, (size+addition) - 1);
+ if (written == 0) {
+ /* try once more, potentially revealing errno */
+ written = tdb_pwrite(tdb, &b, 1, (size+addition) - 1);
+ }
+ if (written == 0) {
+ /* again - give up, guessing errno */
+ errno = ENOSPC;
+ }
+ if (written != 1) {
+ TDB_LOG((tdb, TDB_DEBUG_FATAL, "expand_file to %d failed (%s)\n",
+ size+addition, strerror(errno)));
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static const struct tdb_methods large_io_methods = {
+ tdb_read,
+ tdb_write,
+ tdb_next_hash_chain,
+ tdb_oob,
+ tdb_expand_file_sparse
+};
+
+static int test_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
+ void *_data)
+{
+ TDB_DATA *expect = _data;
+ ok1(key.dsize == strlen("hi"));
+ ok1(memcmp(key.dptr, "hi", strlen("hi")) == 0);
+ ok1(data.dsize == expect->dsize);
+ ok1(memcmp(data.dptr, expect->dptr, data.dsize) == 0);
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ TDB_DATA key, orig_data, data;
+ uint32_t hashval;
+ tdb_off_t rec_ptr;
+ struct tdb_record rec;
+ int ret;
+
+ plan_tests(24);
+ tdb = tdb_open_ex("run-36-file.tdb", 1024, TDB_CLEAR_IF_FIRST,
+ O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL);
+
+ ok1(tdb);
+ tdb->methods = &large_io_methods;
+
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ orig_data.dsize = strlen("world");
+ orig_data.dptr = discard_const_p(uint8_t, "world");
+
+ /* Enlarge the file (internally multiplies by 2). */
+ ret = tdb_expand(tdb, 1500000000);
+#ifdef HAVE_INCOHERENT_MMAP
+ /* This can fail due to mmap failure on 32 bit systems. */
+ if (ret == -1) {
+ /* These should now fail. */
+ ok1(tdb_store(tdb, key, orig_data, TDB_INSERT) == -1);
+ data = tdb_fetch(tdb, key);
+ ok1(data.dptr == NULL);
+ ok1(tdb_traverse(tdb, test_traverse, &orig_data) == -1);
+ ok1(tdb_delete(tdb, key) == -1);
+ ok1(tdb_traverse(tdb, test_traverse, NULL) == -1);
+ /* Skip the rest... */
+ for (ret = 0; ret < 24 - 6; ret++)
+ ok1(1);
+ tdb_close(tdb);
+ return exit_status();
+ }
+#endif
+ ok1(ret == 0);
+
+ /* Put an entry in, and check it. */
+ ok1(tdb_store(tdb, key, orig_data, TDB_INSERT) == 0);
+
+ data = tdb_fetch(tdb, key);
+ ok1(data.dsize == strlen("world"));
+ ok1(memcmp(data.dptr, "world", strlen("world")) == 0);
+ free(data.dptr);
+
+ /* That currently fills at the end, make sure that's true. */
+ hashval = tdb->hash_fn(&key);
+ rec_ptr = tdb_find_lock_hash(tdb, key, hashval, F_RDLCK, &rec);
+ ok1(rec_ptr);
+ ok1(rec_ptr > 2U*1024*1024*1024);
+ tdb_unlock(tdb, BUCKET(rec.full_hash), F_RDLCK);
+
+ /* Traverse must work. */
+ ok1(tdb_traverse(tdb, test_traverse, &orig_data) == 1);
+
+ /* Delete should work. */
+ ok1(tdb_delete(tdb, key) == 0);
+
+ ok1(tdb_traverse(tdb, test_traverse, NULL) == 0);
+
+ /* Transactions should work. */
+ ok1(tdb_transaction_start(tdb) == 0);
+ ok1(tdb_store(tdb, key, orig_data, TDB_INSERT) == 0);
+
+ data = tdb_fetch(tdb, key);
+ ok1(data.dsize == strlen("world"));
+ ok1(memcmp(data.dptr, "world", strlen("world")) == 0);
+ free(data.dptr);
+ ok1(tdb_transaction_commit(tdb) == 0);
+
+ ok1(tdb_traverse(tdb, test_traverse, &orig_data) == 1);
+ tdb_close(tdb);
+
+ return exit_status();
+}
diff --git a/test/run-bad-tdb-header.c b/test/run-bad-tdb-header.c
new file mode 100644
index 0000000..9d29fdf
--- /dev/null
+++ b/test/run-bad-tdb-header.c
@@ -0,0 +1,59 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include "logging.h"
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ struct tdb_header hdr;
+ int fd;
+
+ plan_tests(11);
+ /* Can open fine if complete crap, as long as O_CREAT. */
+ fd = open("run-bad-tdb-header.tdb", O_RDWR|O_CREAT|O_TRUNC, 0600);
+ ok1(fd >= 0);
+ ok1(write(fd, "hello world", 11) == 11);
+ close(fd);
+ tdb = tdb_open_ex("run-bad-tdb-header.tdb", 1024, 0, O_RDWR, 0,
+ &taplogctx, NULL);
+ ok1(!tdb);
+ tdb = tdb_open_ex("run-bad-tdb-header.tdb", 1024, 0, O_CREAT|O_RDWR,
+ 0600, &taplogctx, NULL);
+ ok1(tdb);
+ tdb_close(tdb);
+
+ /* Now, with wrong version it should *not* overwrite. */
+ fd = open("run-bad-tdb-header.tdb", O_RDWR);
+ ok1(fd >= 0);
+ ok1(read(fd, &hdr, sizeof(hdr)) == sizeof(hdr));
+ ok1(hdr.version == TDB_VERSION);
+ hdr.version++;
+ lseek(fd, 0, SEEK_SET);
+ ok1(write(fd, &hdr, sizeof(hdr)) == sizeof(hdr));
+ close(fd);
+
+ tdb = tdb_open_ex("run-bad-tdb-header.tdb", 1024, 0, O_RDWR|O_CREAT,
+ 0600, &taplogctx, NULL);
+ ok1(errno == EIO);
+ ok1(!tdb);
+
+ /* With truncate, will be fine. */
+ tdb = tdb_open_ex("run-bad-tdb-header.tdb", 1024, 0,
+ O_RDWR|O_CREAT|O_TRUNC, 0600, &taplogctx, NULL);
+ ok1(tdb);
+ tdb_close(tdb);
+
+ return exit_status();
+}
diff --git a/test/run-check.c b/test/run-check.c
new file mode 100644
index 0000000..ce389a2
--- /dev/null
+++ b/test/run-check.c
@@ -0,0 +1,65 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include "logging.h"
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ TDB_DATA key, data;
+
+ plan_tests(13);
+ tdb = tdb_open_ex("run-check.tdb", 1, TDB_CLEAR_IF_FIRST,
+ O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL);
+
+ ok1(tdb);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dsize = strlen("world");
+ data.dptr = discard_const_p(uint8_t, "world");
+
+ ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ tdb = tdb_open_ex("run-check.tdb", 1024, 0, O_RDWR, 0,
+ &taplogctx, NULL);
+ ok1(tdb);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ tdb = tdb_open_ex("test/tdb.corrupt", 1024, 0, O_RDWR, 0,
+ &taplogctx, NULL);
+ ok1(tdb);
+ ok1(tdb_check(tdb, NULL, NULL) == -1);
+ ok1(tdb_error(tdb) == TDB_ERR_CORRUPT);
+ tdb_close(tdb);
+
+ /* Big and little endian should work! */
+ tdb = tdb_open_ex("test/old-nohash-le.tdb", 1024, 0, O_RDWR, 0,
+ &taplogctx, NULL);
+ ok1(tdb);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ tdb = tdb_open_ex("test/old-nohash-be.tdb", 1024, 0, O_RDWR, 0,
+ &taplogctx, NULL);
+ ok1(tdb);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ return exit_status();
+}
diff --git a/test/run-corrupt.c b/test/run-corrupt.c
new file mode 100644
index 0000000..e6fc751
--- /dev/null
+++ b/test/run-corrupt.c
@@ -0,0 +1,132 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include "logging.h"
+
+static int check(TDB_DATA key, TDB_DATA data, void *private)
+{
+ unsigned int *sizes = private;
+
+ if (key.dsize > strlen("hello"))
+ return -1;
+ if (memcmp(key.dptr, "hello", key.dsize) != 0)
+ return -1;
+
+ if (data.dsize != strlen("world"))
+ return -1;
+ if (memcmp(data.dptr, "world", data.dsize) != 0)
+ return -1;
+
+ sizes[0] += key.dsize;
+ sizes[1] += data.dsize;
+ return 0;
+}
+
+static void tdb_flip_bit(struct tdb_context *tdb, unsigned int bit)
+{
+ unsigned int off = bit / CHAR_BIT;
+ unsigned char mask = (1 << (bit % CHAR_BIT));
+
+ if (tdb->map_ptr)
+ ((unsigned char *)tdb->map_ptr)[off] ^= mask;
+ else {
+ unsigned char c;
+ if (pread(tdb->fd, &c, 1, off) != 1) {
+ fprintf(stderr, "pread: %s\n", strerror(errno));
+ exit(1);
+ }
+ c ^= mask;
+ if (pwrite(tdb->fd, &c, 1, off) != 1) {
+ fprintf(stderr, "pwrite: %s\n", strerror(errno));
+ exit(1);
+ }
+ }
+}
+
+static void check_test(struct tdb_context *tdb)
+{
+ TDB_DATA key, data;
+ unsigned int i, verifiable, corrupt, sizes[2], dsize, ksize;
+
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+
+ key.dptr = discard_const_p(uint8_t, "hello");
+ data.dsize = strlen("world");
+ data.dptr = discard_const_p(uint8_t, "world");
+
+ /* Key and data size respectively. */
+ dsize = ksize = 0;
+
+ /* 5 keys in hash size 2 means we'll have multichains. */
+ for (key.dsize = 1; key.dsize <= 5; key.dsize++) {
+ ksize += key.dsize;
+ dsize += data.dsize;
+ if (tdb_store(tdb, key, data, TDB_INSERT) != 0)
+ abort();
+ }
+
+ /* This is how many bytes we expect to be verifiable. */
+ /* From the file header. */
+ verifiable = strlen(TDB_MAGIC_FOOD) + 1
+ + 2 * sizeof(uint32_t) + 2 * sizeof(tdb_off_t)
+ + 2 * sizeof(uint32_t);
+ /* From the free list chain and hash chains. */
+ verifiable += 3 * sizeof(tdb_off_t);
+ /* From the record headers & tailer */
+ verifiable += 5 * (sizeof(struct tdb_record) + sizeof(uint32_t));
+ /* The free block: we ignore datalen, keylen, full_hash. */
+ verifiable += sizeof(struct tdb_record) - 3*sizeof(uint32_t) +
+ sizeof(uint32_t);
+ /* Our check function verifies the key and data. */
+ verifiable += ksize + dsize;
+
+ /* Flip one bit at a time, make sure it detects verifiable bytes. */
+ for (i = 0, corrupt = 0; i < tdb->map_size * CHAR_BIT; i++) {
+ tdb_flip_bit(tdb, i);
+ memset(sizes, 0, sizeof(sizes));
+ if (tdb_check(tdb, check, sizes) != 0)
+ corrupt++;
+ else if (sizes[0] != ksize || sizes[1] != dsize)
+ corrupt++;
+ tdb_flip_bit(tdb, i);
+ }
+ ok(corrupt == verifiable * CHAR_BIT, "corrupt %u should be %u",
+ corrupt, verifiable * CHAR_BIT);
+}
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+
+ plan_tests(4);
+ /* This should use mmap. */
+ tdb = tdb_open_ex("run-corrupt.tdb", 2, TDB_CLEAR_IF_FIRST,
+ O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL);
+
+ if (!tdb)
+ abort();
+ check_test(tdb);
+ tdb_close(tdb);
+
+ /* This should not. */
+ tdb = tdb_open_ex("run-corrupt.tdb", 2, TDB_CLEAR_IF_FIRST|TDB_NOMMAP,
+ O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL);
+
+ if (!tdb)
+ abort();
+ check_test(tdb);
+ tdb_close(tdb);
+
+ return exit_status();
+}
diff --git a/test/run-die-during-transaction.c b/test/run-die-during-transaction.c
new file mode 100644
index 0000000..c636d87
--- /dev/null
+++ b/test/run-die-during-transaction.c
@@ -0,0 +1,232 @@
+#include "../common/tdb_private.h"
+#include "lock-tracking.h"
+static ssize_t pwrite_check(int fd, const void *buf, size_t count, off_t offset);
+static ssize_t write_check(int fd, const void *buf, size_t count);
+static int ftruncate_check(int fd, off_t length);
+
+#define pwrite pwrite_check
+#define write write_check
+#define fcntl fcntl_with_lockcheck
+#define ftruncate ftruncate_check
+
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include <setjmp.h>
+#include "external-agent.h"
+#include "logging.h"
+
+#undef write
+#undef pwrite
+#undef fcntl
+#undef ftruncate
+
+static bool in_transaction;
+static int target, current;
+static jmp_buf jmpbuf;
+#define TEST_DBNAME "run-die-during-transaction.tdb"
+#define KEY_STRING "helloworld"
+
+static void maybe_die(int fd)
+{
+ if (in_transaction && current++ == target) {
+ longjmp(jmpbuf, 1);
+ }
+}
+
+static ssize_t pwrite_check(int fd,
+ const void *buf, size_t count, off_t offset)
+{
+ ssize_t ret;
+
+ maybe_die(fd);
+
+ ret = pwrite(fd, buf, count, offset);
+ if (ret != count)
+ return ret;
+
+ maybe_die(fd);
+ return ret;
+}
+
+static ssize_t write_check(int fd, const void *buf, size_t count)
+{
+ ssize_t ret;
+
+ maybe_die(fd);
+
+ ret = write(fd, buf, count);
+ if (ret != count)
+ return ret;
+
+ maybe_die(fd);
+ return ret;
+}
+
+static int ftruncate_check(int fd, off_t length)
+{
+ int ret;
+
+ maybe_die(fd);
+
+ ret = ftruncate(fd, length);
+
+ maybe_die(fd);
+ return ret;
+}
+
+static bool test_death(enum operation op, struct agent *agent)
+{
+ struct tdb_context *tdb = NULL;
+ TDB_DATA key;
+ enum agent_return ret;
+ int needed_recovery = 0;
+
+ current = target = 0;
+reset:
+ unlink(TEST_DBNAME);
+ tdb = tdb_open_ex(TEST_DBNAME, 1024, TDB_NOMMAP,
+ O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL);
+
+ if (setjmp(jmpbuf) != 0) {
+ /* We're partway through. Simulate our death. */
+ close(tdb->fd);
+ forget_locking();
+ in_transaction = false;
+
+ ret = external_agent_operation(agent, NEEDS_RECOVERY, "");
+ if (ret == SUCCESS)
+ needed_recovery++;
+ else if (ret != FAILED) {
+ diag("Step %u agent NEEDS_RECOVERY = %s", current,
+ agent_return_name(ret));
+ return false;
+ }
+
+ ret = external_agent_operation(agent, op, KEY_STRING);
+ if (ret != SUCCESS) {
+ diag("Step %u op %s failed = %s", current,
+ operation_name(op),
+ agent_return_name(ret));
+ return false;
+ }
+
+ ret = external_agent_operation(agent, NEEDS_RECOVERY, "");
+ if (ret != FAILED) {
+ diag("Still needs recovery after step %u = %s",
+ current, agent_return_name(ret));
+ return false;
+ }
+
+ ret = external_agent_operation(agent, CHECK, "");
+ if (ret != SUCCESS) {
+ diag("Step %u check failed = %s", current,
+ agent_return_name(ret));
+ return false;
+ }
+
+ ret = external_agent_operation(agent, CLOSE, "");
+ if (ret != SUCCESS) {
+ diag("Step %u close failed = %s", current,
+ agent_return_name(ret));
+ return false;
+ }
+
+ /* Suppress logging as this tries to use closed fd. */
+ suppress_logging = true;
+ suppress_lockcheck = true;
+ tdb_close(tdb);
+ suppress_logging = false;
+ suppress_lockcheck = false;
+ target++;
+ current = 0;
+ goto reset;
+ }
+
+ /* Put key for agent to fetch. */
+ key.dsize = strlen(KEY_STRING);
+ key.dptr = discard_const_p(uint8_t, KEY_STRING);
+ if (tdb_store(tdb, key, key, TDB_INSERT) != 0)
+ return false;
+
+ /* This is the key we insert in transaction. */
+ key.dsize--;
+
+ ret = external_agent_operation(agent, OPEN, TEST_DBNAME);
+ if (ret != SUCCESS) {
+ fprintf(stderr, "Agent failed to open: %s\n",
+ agent_return_name(ret));
+ exit(1);
+ }
+
+ ret = external_agent_operation(agent, FETCH, KEY_STRING);
+ if (ret != SUCCESS) {
+ fprintf(stderr, "Agent failed find key: %s\n",
+ agent_return_name(ret));
+ exit(1);
+ }
+
+ in_transaction = true;
+ if (tdb_transaction_start(tdb) != 0)
+ return false;
+
+ if (tdb_store(tdb, key, key, TDB_INSERT) != 0)
+ return false;
+
+ if (tdb_transaction_commit(tdb) != 0)
+ return false;
+
+ in_transaction = false;
+
+ /* We made it! */
+ diag("Completed %u runs", current);
+ tdb_close(tdb);
+ ret = external_agent_operation(agent, CLOSE, "");
+ if (ret != SUCCESS) {
+ diag("Step %u close failed = %s", current,
+ agent_return_name(ret));
+ return false;
+ }
+
+#ifdef HAVE_INCOHERENT_MMAP
+ /* This means we always mmap, which makes this test a noop. */
+ ok1(1);
+#else
+ ok1(needed_recovery);
+#endif
+ ok1(locking_errors == 0);
+ ok1(forget_locking() == 0);
+ locking_errors = 0;
+ return true;
+}
+
+int main(int argc, char *argv[])
+{
+ enum operation ops[] = { FETCH, STORE, TRANSACTION_START };
+ struct agent *agent;
+ int i;
+
+ plan_tests(12);
+ unlock_callback = maybe_die;
+
+ agent = prepare_external_agent();
+
+ for (i = 0; i < sizeof(ops)/sizeof(ops[0]); i++) {
+ diag("Testing %s after death", operation_name(ops[i]));
+ ok1(test_death(ops[i], agent));
+ }
+
+ return exit_status();
+}
diff --git a/test/run-endian.c b/test/run-endian.c
new file mode 100644
index 0000000..9d4d5f5
--- /dev/null
+++ b/test/run-endian.c
@@ -0,0 +1,64 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include "logging.h"
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ TDB_DATA key, data;
+
+ plan_tests(13);
+ tdb = tdb_open_ex("run-endian.tdb", 1024,
+ TDB_CLEAR_IF_FIRST|TDB_CONVERT,
+ O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL);
+
+ ok1(tdb);
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dsize = strlen("world");
+ data.dptr = discard_const_p(uint8_t, "world");
+
+ ok1(tdb_store(tdb, key, data, TDB_MODIFY) < 0);
+ ok1(tdb_error(tdb) == TDB_ERR_NOEXIST);
+ ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0);
+ ok1(tdb_store(tdb, key, data, TDB_INSERT) < 0);
+ ok1(tdb_error(tdb) == TDB_ERR_EXISTS);
+ ok1(tdb_store(tdb, key, data, TDB_MODIFY) == 0);
+
+ data = tdb_fetch(tdb, key);
+ ok1(data.dsize == strlen("world"));
+ ok1(memcmp(data.dptr, "world", strlen("world")) == 0);
+ free(data.dptr);
+
+ key.dsize++;
+ data = tdb_fetch(tdb, key);
+ ok1(data.dptr == NULL);
+ tdb_close(tdb);
+
+ /* Reopen: should read it */
+ tdb = tdb_open_ex("run-endian.tdb", 1024, 0, O_RDWR, 0,
+ &taplogctx, NULL);
+ ok1(tdb);
+
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data = tdb_fetch(tdb, key);
+ ok1(data.dsize == strlen("world"));
+ ok1(memcmp(data.dptr, "world", strlen("world")) == 0);
+ free(data.dptr);
+ tdb_close(tdb);
+
+ return exit_status();
+}
diff --git a/test/run-incompatible.c b/test/run-incompatible.c
new file mode 100644
index 0000000..5f1b586
--- /dev/null
+++ b/test/run-incompatible.c
@@ -0,0 +1,188 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+
+static unsigned int tdb_dumb_hash(TDB_DATA *key)
+{
+ return key->dsize;
+}
+
+static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...)
+{
+ unsigned int *count = tdb_get_logging_private(tdb);
+ if (strstr(fmt, "hash"))
+ (*count)++;
+}
+
+static unsigned int hdr_rwlocks(const char *fname)
+{
+ struct tdb_header hdr;
+ ssize_t nread;
+
+ int fd = open(fname, O_RDONLY);
+ if (fd == -1)
+ return -1;
+
+ nread = read(fd, &hdr, sizeof(hdr));
+ close(fd);
+ if (nread != sizeof(hdr)) {
+ return -1;
+ }
+ return hdr.rwlocks;
+}
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ unsigned int log_count, flags;
+ TDB_DATA d, r;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+
+ plan_tests(38 * 2);
+
+ for (flags = 0; flags <= TDB_CONVERT; flags += TDB_CONVERT) {
+ unsigned int rwmagic = TDB_HASH_RWLOCK_MAGIC;
+
+ if (flags & TDB_CONVERT)
+ tdb_convert(&rwmagic, sizeof(rwmagic));
+
+ /* Create an old-style hash. */
+ log_count = 0;
+ tdb = tdb_open_ex("run-incompatible.tdb", 0, flags,
+ O_CREAT|O_RDWR|O_TRUNC, 0600, &log_ctx,
+ NULL);
+ ok1(tdb);
+ ok1(log_count == 0);
+ d.dptr = discard_const_p(uint8_t, "Hello");
+ d.dsize = 5;
+ ok1(tdb_store(tdb, d, d, TDB_INSERT) == 0);
+ tdb_close(tdb);
+
+ /* Should not have marked rwlocks field. */
+ ok1(hdr_rwlocks("run-incompatible.tdb") == 0);
+
+ /* We can still open any old-style with incompat flag. */
+ log_count = 0;
+ tdb = tdb_open_ex("run-incompatible.tdb", 0,
+ TDB_INCOMPATIBLE_HASH,
+ O_RDWR, 0600, &log_ctx, NULL);
+ ok1(tdb);
+ ok1(log_count == 0);
+ r = tdb_fetch(tdb, d);
+ ok1(r.dsize == 5);
+ free(r.dptr);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ log_count = 0;
+ tdb = tdb_open_ex("test/jenkins-le-hash.tdb", 0, 0, O_RDONLY,
+ 0, &log_ctx, tdb_jenkins_hash);
+ ok1(tdb);
+ ok1(log_count == 0);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ log_count = 0;
+ tdb = tdb_open_ex("test/jenkins-be-hash.tdb", 0, 0, O_RDONLY,
+ 0, &log_ctx, tdb_jenkins_hash);
+ ok1(tdb);
+ ok1(log_count == 0);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ /* OK, now create with incompatible flag, default hash. */
+ log_count = 0;
+ tdb = tdb_open_ex("run-incompatible.tdb", 0,
+ flags|TDB_INCOMPATIBLE_HASH,
+ O_CREAT|O_RDWR|O_TRUNC, 0600, &log_ctx,
+ NULL);
+ ok1(tdb);
+ ok1(log_count == 0);
+ d.dptr = discard_const_p(uint8_t, "Hello");
+ d.dsize = 5;
+ ok1(tdb_store(tdb, d, d, TDB_INSERT) == 0);
+ tdb_close(tdb);
+
+ /* Should have marked rwlocks field. */
+ ok1(hdr_rwlocks("run-incompatible.tdb") == rwmagic);
+
+ /* Cannot open with old hash. */
+ log_count = 0;
+ tdb = tdb_open_ex("run-incompatible.tdb", 0, 0,
+ O_RDWR, 0600, &log_ctx, tdb_old_hash);
+ ok1(!tdb);
+ ok1(log_count == 1);
+
+ /* Can open with jenkins hash. */
+ log_count = 0;
+ tdb = tdb_open_ex("run-incompatible.tdb", 0, 0,
+ O_RDWR, 0600, &log_ctx, tdb_jenkins_hash);
+ ok1(tdb);
+ ok1(log_count == 0);
+ r = tdb_fetch(tdb, d);
+ ok1(r.dsize == 5);
+ free(r.dptr);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ /* Can open by letting it figure it out itself. */
+ log_count = 0;
+ tdb = tdb_open_ex("run-incompatible.tdb", 0, 0,
+ O_RDWR, 0600, &log_ctx, NULL);
+ ok1(tdb);
+ ok1(log_count == 0);
+ r = tdb_fetch(tdb, d);
+ ok1(r.dsize == 5);
+ free(r.dptr);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ /* We can also use incompatible hash with other hashes. */
+ log_count = 0;
+ tdb = tdb_open_ex("run-incompatible.tdb", 0,
+ flags|TDB_INCOMPATIBLE_HASH,
+ O_CREAT|O_RDWR|O_TRUNC, 0600, &log_ctx,
+ tdb_dumb_hash);
+ ok1(tdb);
+ ok1(log_count == 0);
+ d.dptr = discard_const_p(uint8_t, "Hello");
+ d.dsize = 5;
+ ok1(tdb_store(tdb, d, d, TDB_INSERT) == 0);
+ tdb_close(tdb);
+
+ /* Should have marked rwlocks field. */
+ ok1(hdr_rwlocks("run-incompatible.tdb") == rwmagic);
+
+ /* It should not open if we don't specify. */
+ log_count = 0;
+ tdb = tdb_open_ex("run-incompatible.tdb", 0, 0, O_RDWR, 0,
+ &log_ctx, NULL);
+ ok1(!tdb);
+ ok1(log_count == 1);
+
+ /* Should reopen with correct hash. */
+ log_count = 0;
+ tdb = tdb_open_ex("run-incompatible.tdb", 0, 0, O_RDWR, 0,
+ &log_ctx, tdb_dumb_hash);
+ ok1(tdb);
+ ok1(log_count == 0);
+ r = tdb_fetch(tdb, d);
+ ok1(r.dsize == 5);
+ free(r.dptr);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+ }
+
+ return exit_status();
+}
diff --git a/test/run-marklock-deadlock.c b/test/run-marklock-deadlock.c
new file mode 100644
index 0000000..ff03a11
--- /dev/null
+++ b/test/run-marklock-deadlock.c
@@ -0,0 +1,278 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <stdarg.h>
+#include "logging.h"
+
+static TDB_DATA key, data;
+
+static void do_chainlock(const char *name, int tdb_flags, int up, int down)
+{
+ struct tdb_context *tdb;
+ int ret;
+ ssize_t nread, nwritten;
+ char c = 0;
+
+ tdb = tdb_open_ex(name, 3, tdb_flags,
+ O_RDWR|O_CREAT, 0755, &taplogctx, NULL);
+ ok(tdb, "tdb_open_ex should succeed");
+
+ ret = tdb_chainlock(tdb, key);
+ ok(ret == 0, "tdb_chainlock should succeed");
+
+ nwritten = write(up, &c, sizeof(c));
+ ok(nwritten == sizeof(c), "write should succeed");
+
+ nread = read(down, &c, sizeof(c));
+ ok(nread == sizeof(c), "read should succeed");
+
+ exit(0);
+}
+
+static void do_allrecord_lock(const char *name, int tdb_flags, int up, int down)
+{
+ struct tdb_context *tdb;
+ int ret;
+ ssize_t nread, nwritten;
+ char c = 0;
+
+ tdb = tdb_open_ex(name, 3, tdb_flags,
+ O_RDWR|O_CREAT, 0755, &taplogctx, NULL);
+ ok(tdb, "tdb_open_ex should succeed");
+
+ ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
+ ok(ret == 0, "tdb_allrecord_lock should succeed");
+
+ nwritten = write(up, &c, sizeof(c));
+ ok(nwritten == sizeof(c), "write should succeed");
+
+ nread = read(down, &c, sizeof(c));
+ ok(nread == sizeof(c), "read should succeed");
+
+ exit(0);
+}
+
+/* The code should barf on TDBs created with rwlocks. */
+static int do_tests(const char *name, int tdb_flags)
+{
+ struct tdb_context *tdb;
+ int ret;
+ pid_t chainlock_child, allrecord_child;
+ int chainlock_down[2];
+ int chainlock_up[2];
+ int allrecord_down[2];
+ int allrecord_up[2];
+ char c;
+ ssize_t nread, nwritten;
+
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dsize = strlen("world");
+ data.dptr = discard_const_p(uint8_t, "world");
+
+ ret = pipe(chainlock_down);
+ ok(ret == 0, "pipe should succeed");
+
+ ret = pipe(chainlock_up);
+ ok(ret == 0, "pipe should succeed");
+
+ ret = pipe(allrecord_down);
+ ok(ret == 0, "pipe should succeed");
+
+ ret = pipe(allrecord_up);
+ ok(ret == 0, "pipe should succeed");
+
+ chainlock_child = fork();
+ ok(chainlock_child != -1, "fork should succeed");
+
+ if (chainlock_child == 0) {
+ close(chainlock_up[0]);
+ close(chainlock_down[1]);
+ close(allrecord_up[0]);
+ close(allrecord_up[1]);
+ close(allrecord_down[0]);
+ close(allrecord_down[1]);
+ do_chainlock(name, tdb_flags,
+ chainlock_up[1], chainlock_down[0]);
+ exit(0);
+ }
+ close(chainlock_up[1]);
+ close(chainlock_down[0]);
+
+ nread = read(chainlock_up[0], &c, sizeof(c));
+ ok(nread == sizeof(c), "read should succeed");
+
+ /*
+ * Now we have a process holding a chainlock. Start another process
+ * trying the allrecord lock. This will block.
+ */
+
+ allrecord_child = fork();
+ ok(allrecord_child != -1, "fork should succeed");
+
+ if (allrecord_child == 0) {
+ close(chainlock_up[0]);
+ close(chainlock_up[1]);
+ close(chainlock_down[0]);
+ close(chainlock_down[1]);
+ close(allrecord_up[0]);
+ close(allrecord_down[1]);
+ do_allrecord_lock(name, tdb_flags,
+ allrecord_up[1], allrecord_down[0]);
+ exit(0);
+ }
+ close(allrecord_up[1]);
+ close(allrecord_down[0]);
+
+ poll(NULL, 0, 500);
+
+ tdb = tdb_open_ex(name, 3, tdb_flags,
+ O_RDWR|O_CREAT, 0755, &taplogctx, NULL);
+ ok(tdb, "tdb_open_ex should succeed");
+
+ /*
+ * Someone already holds a chainlock, but we're able to get the
+ * freelist lock.
+ *
+ * The freelist lock/mutex is independent from the allrecord lock/mutex.
+ */
+
+ ret = tdb_chainlock_nonblock(tdb, key);
+ ok(ret == -1, "tdb_chainlock_nonblock should not succeed");
+
+ ret = tdb_lock_nonblock(tdb, -1, F_WRLCK);
+ ok(ret == 0, "tdb_lock_nonblock should succeed");
+
+ ret = tdb_unlock(tdb, -1, F_WRLCK);
+ ok(ret == 0, "tdb_unlock should succeed");
+
+ /*
+ * We have someone else having done the lock for us. Just mark it.
+ */
+
+ ret = tdb_chainlock_mark(tdb, key);
+ ok(ret == 0, "tdb_chainlock_mark should succeed");
+
+ /*
+ * The tdb_store below will block the freelist. In one version of the
+ * mutex patches, the freelist was already blocked here by the
+ * allrecord child, which was waiting for the chainlock child to give
+ * up its chainlock. Make sure that we don't run into this
+ * deadlock. To excercise the deadlock, just comment out the "ok"
+ * line.
+ *
+ * The freelist lock/mutex is independent from the allrecord lock/mutex.
+ */
+
+ ret = tdb_lock_nonblock(tdb, -1, F_WRLCK);
+ ok(ret == 0, "tdb_lock_nonblock should succeed");
+
+ ret = tdb_unlock(tdb, -1, F_WRLCK);
+ ok(ret == 0, "tdb_unlock should succeed");
+
+ ret = tdb_store(tdb, key, data, TDB_INSERT);
+ ok(ret == 0, "tdb_store should succeed");
+
+ ret = tdb_chainlock_unmark(tdb, key);
+ ok(ret == 0, "tdb_chainlock_unmark should succeed");
+
+ nwritten = write(chainlock_down[1], &c, sizeof(c));
+ ok(nwritten == sizeof(c), "write should succeed");
+
+ nread = read(chainlock_up[0], &c, sizeof(c));
+ ok(nread == 0, "read should succeed");
+
+ nread = read(allrecord_up[0], &c, sizeof(c));
+ ok(nread == sizeof(c), "read should succeed");
+
+ /*
+ * Someone already holds the allrecord lock, but we're able to get the
+ * freelist lock.
+ *
+ * The freelist lock/mutex is independent from the allrecord lock/mutex.
+ */
+
+ ret = tdb_chainlock_nonblock(tdb, key);
+ ok(ret == -1, "tdb_chainlock_nonblock should not succeed");
+
+ ret = tdb_lockall_nonblock(tdb);
+ ok(ret == -1, "tdb_lockall_nonblock should not succeed");
+
+ ret = tdb_lock_nonblock(tdb, -1, F_WRLCK);
+ ok(ret == 0, "tdb_lock_nonblock should succeed");
+
+ ret = tdb_unlock(tdb, -1, F_WRLCK);
+ ok(ret == 0, "tdb_unlock should succeed");
+
+ /*
+ * We have someone else having done the lock for us. Just mark it.
+ */
+
+ ret = tdb_lockall_mark(tdb);
+ ok(ret == 0, "tdb_lockall_mark should succeed");
+
+ ret = tdb_lock_nonblock(tdb, -1, F_WRLCK);
+ ok(ret == 0, "tdb_lock_nonblock should succeed");
+
+ ret = tdb_unlock(tdb, -1, F_WRLCK);
+ ok(ret == 0, "tdb_unlock should succeed");
+
+ ret = tdb_store(tdb, key, data, TDB_REPLACE);
+ ok(ret == 0, "tdb_store should succeed");
+
+ ret = tdb_lockall_unmark(tdb);
+ ok(ret == 0, "tdb_lockall_unmark should succeed");
+
+ nwritten = write(allrecord_down[1], &c, sizeof(c));
+ ok(nwritten == sizeof(c), "write should succeed");
+
+ nread = read(allrecord_up[0], &c, sizeof(c));
+ ok(nread == 0, "read should succeed");
+
+ close(chainlock_up[0]);
+ close(chainlock_down[1]);
+ close(allrecord_up[0]);
+ close(allrecord_down[1]);
+ diag("%s tests done", name);
+ return exit_status();
+}
+
+int main(int argc, char *argv[])
+{
+ int ret;
+ bool mutex_support;
+
+ mutex_support = tdb_runtime_check_for_robust_mutexes();
+
+ ret = do_tests("marklock-deadlock-fcntl.tdb",
+ TDB_CLEAR_IF_FIRST |
+ TDB_INCOMPATIBLE_HASH);
+ ok(ret == 0, "marklock-deadlock-fcntl.tdb tests should succeed");
+
+ if (!mutex_support) {
+ skip(1, "No robust mutex support, "
+ "skipping marklock-deadlock-mutex.tdb tests");
+ return exit_status();
+ }
+
+ ret = do_tests("marklock-deadlock-mutex.tdb",
+ TDB_CLEAR_IF_FIRST |
+ TDB_MUTEX_LOCKING |
+ TDB_INCOMPATIBLE_HASH);
+ ok(ret == 0, "marklock-deadlock-mutex.tdb tests should succeed");
+
+ return exit_status();
+}
diff --git a/test/run-mutex-allrecord-bench.c b/test/run-mutex-allrecord-bench.c
new file mode 100644
index 0000000..b81e597
--- /dev/null
+++ b/test/run-mutex-allrecord-bench.c
@@ -0,0 +1,82 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <stdarg.h>
+
+static TDB_DATA key, data;
+
+static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level,
+ const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+
+static double timeval_elapsed2(const struct timeval *tv1, const struct timeval *tv2)
+{
+ return (tv2->tv_sec - tv1->tv_sec) +
+ (tv2->tv_usec - tv1->tv_usec)*1.0e-6;
+}
+
+static double timeval_elapsed(const struct timeval *tv)
+{
+ struct timeval tv2;
+ gettimeofday(&tv2, NULL);
+ return timeval_elapsed2(tv, &tv2);
+}
+
+/* The code should barf on TDBs created with rwlocks. */
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+ int ret;
+ struct timeval start;
+ double elapsed;
+ bool runtime_support;
+
+ runtime_support = tdb_runtime_check_for_robust_mutexes();
+
+ if (!runtime_support) {
+ skip(1, "No robust mutex support");
+ return exit_status();
+ }
+
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dsize = strlen("world");
+ data.dptr = discard_const_p(uint8_t, "world");
+
+ tdb = tdb_open_ex("mutex-allrecord-bench.tdb", 1000000,
+ TDB_INCOMPATIBLE_HASH|
+ TDB_MUTEX_LOCKING|
+ TDB_CLEAR_IF_FIRST,
+ O_RDWR|O_CREAT, 0755, &log_ctx, NULL);
+ ok(tdb, "tdb_open_ex should succeed");
+
+ gettimeofday(&start, NULL);
+ ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
+ elapsed = timeval_elapsed(&start);
+
+ ok(ret == 0, "tdb_allrecord_lock should succeed");
+
+ diag("allrecord_lock took %f seconds", elapsed);
+
+ return exit_status();
+}
diff --git a/test/run-mutex-allrecord-block.c b/test/run-mutex-allrecord-block.c
new file mode 100644
index 0000000..fcd3b4f
--- /dev/null
+++ b/test/run-mutex-allrecord-block.c
@@ -0,0 +1,120 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <stdarg.h>
+
+static TDB_DATA key, data;
+
+static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level,
+ const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+
+static int do_child(int tdb_flags, int to, int from)
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+ int ret;
+ char c = 0;
+
+ tdb = tdb_open_ex("mutex-allrecord-block.tdb", 3, tdb_flags,
+ O_RDWR|O_CREAT, 0755, &log_ctx, NULL);
+ ok(tdb, "tdb_open_ex should succeed");
+
+ ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
+ ok(ret == 0, "tdb_allrecord_lock should succeed");
+
+ write(to, &c, sizeof(c));
+
+ read(from, &c, sizeof(c));
+
+ ret = tdb_allrecord_unlock(tdb, F_WRLCK, false);
+ ok(ret == 0, "tdb_allrecord_unlock should succeed");
+
+ return 0;
+}
+
+/* The code should barf on TDBs created with rwlocks. */
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+ int ret, status;
+ pid_t child, wait_ret;
+ int fromchild[2];
+ int tochild[2];
+ char c;
+ int tdb_flags;
+ bool runtime_support;
+
+ runtime_support = tdb_runtime_check_for_robust_mutexes();
+
+ if (!runtime_support) {
+ skip(1, "No robust mutex support");
+ return exit_status();
+ }
+
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dsize = strlen("world");
+ data.dptr = discard_const_p(uint8_t, "world");
+
+ pipe(fromchild);
+ pipe(tochild);
+
+ tdb_flags = TDB_INCOMPATIBLE_HASH|
+ TDB_MUTEX_LOCKING|
+ TDB_CLEAR_IF_FIRST;
+
+ child = fork();
+ if (child == 0) {
+ close(fromchild[0]);
+ close(tochild[1]);
+ return do_child(tdb_flags, fromchild[1], tochild[0]);
+ }
+ close(fromchild[1]);
+ close(tochild[0]);
+
+ read(fromchild[0], &c, sizeof(c));
+
+ tdb = tdb_open_ex("mutex-allrecord-block.tdb", 0,
+ tdb_flags, O_RDWR|O_CREAT, 0755,
+ &log_ctx, NULL);
+ ok(tdb, "tdb_open_ex should succeed");
+
+ ret = tdb_chainlock_nonblock(tdb, key);
+ ok(ret == -1, "tdb_chainlock_nonblock should not succeed");
+
+ write(tochild[1], &c, sizeof(c));
+
+ ret = tdb_chainlock(tdb, key);
+ ok(ret == 0, "tdb_chainlock should not succeed");
+
+ ret = tdb_chainunlock(tdb, key);
+ ok(ret == 0, "tdb_chainunlock should succeed");
+
+ wait_ret = wait(&status);
+ ok(wait_ret == child, "child should have exited correctly");
+
+ diag("done");
+ return exit_status();
+}
diff --git a/test/run-mutex-allrecord-trylock.c b/test/run-mutex-allrecord-trylock.c
new file mode 100644
index 0000000..4b683db
--- /dev/null
+++ b/test/run-mutex-allrecord-trylock.c
@@ -0,0 +1,113 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <stdarg.h>
+
+static TDB_DATA key, data;
+
+static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level,
+ const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+
+static int do_child(int tdb_flags, int to, int from)
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+ int ret;
+ char c = 0;
+
+ tdb = tdb_open_ex("mutex-allrecord-trylock.tdb", 3, tdb_flags,
+ O_RDWR|O_CREAT, 0755, &log_ctx, NULL);
+ ok(tdb, "tdb_open_ex should succeed");
+
+ ret = tdb_chainlock(tdb, key);
+ ok(ret == 0, "tdb_chainlock should succeed");
+
+ write(to, &c, sizeof(c));
+
+ read(from, &c, sizeof(c));
+
+ ret = tdb_chainunlock(tdb, key);
+ ok(ret == 0, "tdb_chainunlock should succeed");
+
+ return 0;
+}
+
+/* The code should barf on TDBs created with rwlocks. */
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+ int ret, status;
+ pid_t child, wait_ret;
+ int fromchild[2];
+ int tochild[2];
+ char c;
+ int tdb_flags;
+ bool runtime_support;
+
+ runtime_support = tdb_runtime_check_for_robust_mutexes();
+
+ if (!runtime_support) {
+ skip(1, "No robust mutex support");
+ return exit_status();
+ }
+
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dsize = strlen("world");
+ data.dptr = discard_const_p(uint8_t, "world");
+
+ pipe(fromchild);
+ pipe(tochild);
+
+ tdb_flags = TDB_INCOMPATIBLE_HASH|
+ TDB_MUTEX_LOCKING|
+ TDB_CLEAR_IF_FIRST;
+
+ child = fork();
+ if (child == 0) {
+ close(fromchild[0]);
+ close(tochild[1]);
+ return do_child(tdb_flags, fromchild[1], tochild[0]);
+ }
+ close(fromchild[1]);
+ close(tochild[0]);
+
+ read(fromchild[0], &c, sizeof(c));
+
+ tdb = tdb_open_ex("mutex-allrecord-trylock.tdb", 0, tdb_flags,
+ O_RDWR|O_CREAT, 0755, &log_ctx, NULL);
+ ok(tdb, "tdb_open_ex should succeed");
+
+ ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_NOWAIT, false);
+ ok(ret == -1, "tdb_allrecord_lock (nowait) should not succeed");
+
+ write(tochild[1], &c, sizeof(c));
+
+ wait_ret = wait(&status);
+ ok(wait_ret == child, "child should have exited correctly");
+
+ diag("done");
+ return exit_status();
+}
diff --git a/test/run-mutex-die.c b/test/run-mutex-die.c
new file mode 100644
index 0000000..4b8eac1
--- /dev/null
+++ b/test/run-mutex-die.c
@@ -0,0 +1,269 @@
+#include "../common/tdb_private.h"
+#include "lock-tracking.h"
+static ssize_t pwrite_check(int fd, const void *buf, size_t count, off_t offset);
+static ssize_t write_check(int fd, const void *buf, size_t count);
+static int ftruncate_check(int fd, off_t length);
+
+#define pwrite pwrite_check
+#define write write_check
+#define fcntl fcntl_with_lockcheck
+#define ftruncate ftruncate_check
+
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include "external-agent.h"
+#include "logging.h"
+
+#undef write
+#undef pwrite
+#undef fcntl
+#undef ftruncate
+
+static int target, current;
+#define TEST_DBNAME "run-mutex-die.tdb"
+#define KEY_STRING "helloworld"
+
+static void maybe_die(int fd)
+{
+ if (target == 0) {
+ return;
+ }
+ current += 1;
+ if (current == target) {
+ _exit(1);
+ }
+}
+
+static ssize_t pwrite_check(int fd,
+ const void *buf, size_t count, off_t offset)
+{
+ ssize_t ret;
+
+ maybe_die(fd);
+
+ ret = pwrite(fd, buf, count, offset);
+ if (ret != count)
+ return ret;
+
+ maybe_die(fd);
+ return ret;
+}
+
+static ssize_t write_check(int fd, const void *buf, size_t count)
+{
+ ssize_t ret;
+
+ maybe_die(fd);
+
+ ret = write(fd, buf, count);
+ if (ret != count)
+ return ret;
+
+ maybe_die(fd);
+ return ret;
+}
+
+static int ftruncate_check(int fd, off_t length)
+{
+ int ret;
+
+ maybe_die(fd);
+
+ ret = ftruncate(fd, length);
+
+ maybe_die(fd);
+ return ret;
+}
+
+static enum agent_return flakey_ops(struct agent *a)
+{
+ enum agent_return ret;
+
+ /*
+ * Run in the external agent child
+ */
+
+ ret = external_agent_operation(a, OPEN_WITH_CLEAR_IF_FIRST, TEST_DBNAME);
+ if (ret != SUCCESS) {
+ fprintf(stderr, "Agent failed to open: %s\n",
+ agent_return_name(ret));
+ return ret;
+ }
+ ret = external_agent_operation(a, UNMAP, "");
+ if (ret != SUCCESS) {
+ fprintf(stderr, "Agent failed to unmap: %s\n",
+ agent_return_name(ret));
+ return ret;
+ }
+ ret = external_agent_operation(a, STORE, "xyz");
+ if (ret != SUCCESS) {
+ fprintf(stderr, "Agent failed to store: %s\n",
+ agent_return_name(ret));
+ return ret;
+ }
+ ret = external_agent_operation(a, STORE, KEY_STRING);
+ if (ret != SUCCESS) {
+ fprintf(stderr, "Agent failed store: %s\n",
+ agent_return_name(ret));
+ return ret;
+ }
+ ret = external_agent_operation(a, FETCH, KEY_STRING);
+ if (ret != SUCCESS) {
+ fprintf(stderr, "Agent failed find key: %s\n",
+ agent_return_name(ret));
+ return ret;
+ }
+ ret = external_agent_operation(a, PING, "");
+ if (ret != SUCCESS) {
+ fprintf(stderr, "Agent failed ping: %s\n",
+ agent_return_name(ret));
+ return ret;
+ }
+ return ret;
+}
+
+static bool prep_db(void) {
+ struct tdb_context *tdb;
+ TDB_DATA key;
+ TDB_DATA data;
+
+ key.dptr = discard_const_p(uint8_t, KEY_STRING);
+ key.dsize = strlen((char *)key.dptr);
+ data.dptr = discard_const_p(uint8_t, "foo");
+ data.dsize = strlen((char *)data.dptr);
+
+ unlink(TEST_DBNAME);
+
+ tdb = tdb_open_ex(
+ TEST_DBNAME, 2,
+ TDB_INCOMPATIBLE_HASH|TDB_MUTEX_LOCKING|TDB_CLEAR_IF_FIRST,
+ O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL);
+ if (tdb == NULL) {
+ return false;
+ }
+
+ if (tdb_store(tdb, key, data, TDB_INSERT) != 0) {
+ return false;
+ }
+
+ tdb_close(tdb);
+ tdb = NULL;
+
+ forget_locking();
+
+ return true;
+}
+
+static bool test_db(void) {
+ struct tdb_context *tdb;
+ int ret;
+
+ tdb = tdb_open_ex(
+ TEST_DBNAME, 1024, TDB_INCOMPATIBLE_HASH,
+ O_RDWR, 0600, &taplogctx, NULL);
+
+ if (tdb == NULL) {
+ perror("tdb_open_ex failed");
+ return false;
+ }
+
+ ret = tdb_traverse(tdb, NULL, NULL);
+ if (ret == -1) {
+ perror("traverse failed");
+ goto fail;
+ }
+
+ tdb_close(tdb);
+
+ forget_locking();
+
+ return true;
+
+fail:
+ tdb_close(tdb);
+ return false;
+}
+
+static bool test_one(void)
+{
+ enum agent_return ret;
+
+ ret = AGENT_DIED;
+ target = 19;
+
+ while (ret != SUCCESS) {
+ struct agent *agent;
+
+ {
+ int child_target = target;
+ bool pret;
+ target = 0;
+ pret = prep_db();
+ ok1(pret);
+ target = child_target;
+ }
+
+ agent = prepare_external_agent();
+
+ ret = flakey_ops(agent);
+
+ diag("Agent (target=%d) returns %s",
+ target, agent_return_name(ret));
+
+ if (ret == SUCCESS) {
+ ok((target > 19), "At least one AGENT_DIED expected");
+ } else {
+ ok(ret == AGENT_DIED, "AGENT_DIED expected");
+ }
+
+ shutdown_agent(agent);
+
+ {
+ int child_target = target;
+ bool tret;
+ target = 0;
+ tret = test_db();
+ ok1(tret);
+ target = child_target;
+ }
+
+ target += 1;
+ }
+
+ return true;
+}
+
+int main(int argc, char *argv[])
+{
+ bool ret;
+ bool runtime_support;
+
+ runtime_support = tdb_runtime_check_for_robust_mutexes();
+
+ if (!runtime_support) {
+ skip(1, "No robust mutex support");
+ return exit_status();
+ }
+
+ plan_tests(12);
+ unlock_callback = maybe_die;
+
+ ret = test_one();
+ ok1(ret);
+
+ diag("done");
+ return exit_status();
+}
diff --git a/test/run-mutex-openflags2.c b/test/run-mutex-openflags2.c
new file mode 100644
index 0000000..6522ae4
--- /dev/null
+++ b/test/run-mutex-openflags2.c
@@ -0,0 +1,153 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <poll.h>
+#include <stdarg.h>
+
+static TDB_DATA key, data;
+
+static void log_void(struct tdb_context *tdb, enum tdb_debug_level level,
+ const char *fmt, ...)
+{
+}
+
+static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level,
+ const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+
+static int do_child(int fd)
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+ struct tdb_logging_context nolog_ctx = { log_void, NULL };
+ char c;
+
+ read(fd, &c, 1);
+
+ tdb = tdb_open_ex("mutex-openflags2.tdb", 0,
+ TDB_DEFAULT,
+ O_RDWR|O_CREAT, 0755, &nolog_ctx, NULL);
+ ok((tdb == NULL) && (errno == EINVAL), "TDB_DEFAULT without "
+ "TDB_MUTEX_LOCKING should fail with EINVAL - %d", errno);
+
+ tdb = tdb_open_ex("mutex-openflags2.tdb", 0,
+ TDB_CLEAR_IF_FIRST,
+ O_RDWR|O_CREAT, 0755, &nolog_ctx, NULL);
+ ok((tdb == NULL) && (errno == EINVAL), "TDB_CLEAR_IF_FIRST without "
+ "TDB_MUTEX_LOCKING should fail with EINVAL - %d", errno);
+
+ tdb = tdb_open_ex("mutex-openflags2.tdb", 0,
+ TDB_CLEAR_IF_FIRST |
+ TDB_MUTEX_LOCKING |
+ TDB_INTERNAL,
+ O_RDWR|O_CREAT, 0755, &nolog_ctx, NULL);
+ ok((tdb == NULL) && (errno == EINVAL), "TDB_MUTEX_LOCKING with "
+ "TDB_INTERNAL should fail with EINVAL - %d", errno);
+
+ tdb = tdb_open_ex("mutex-openflags2.tdb", 0,
+ TDB_CLEAR_IF_FIRST |
+ TDB_MUTEX_LOCKING |
+ TDB_NOMMAP,
+ O_RDWR|O_CREAT, 0755, &nolog_ctx, NULL);
+ ok((tdb == NULL) && (errno == EINVAL), "TDB_MUTEX_LOCKING with "
+ "TDB_NOMMAP should fail with EINVAL - %d", errno);
+
+ tdb = tdb_open_ex("mutex-openflags2.tdb", 0,
+ TDB_CLEAR_IF_FIRST |
+ TDB_MUTEX_LOCKING,
+ O_RDONLY, 0755, &nolog_ctx, NULL);
+ ok((tdb != NULL), "TDB_MUTEX_LOCKING with "
+ "O_RDONLY should work - %d", errno);
+ tdb_close(tdb);
+
+ tdb = tdb_open_ex("mutex-openflags2.tdb", 0,
+ TDB_CLEAR_IF_FIRST |
+ TDB_MUTEX_LOCKING,
+ O_RDWR|O_CREAT, 0755, &log_ctx, NULL);
+ ok((tdb != NULL), "TDB_MUTEX_LOCKING with TDB_CLEAR_IF_FIRST"
+ "TDB_NOMMAP should work - %d", errno);
+
+ return 0;
+}
+
+/* The code should barf on TDBs created with rwlocks. */
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+ struct tdb_logging_context nolog_ctx = { log_void, NULL };
+ int ret, status;
+ pid_t child, wait_ret;
+ int pipefd[2];
+ char c = 0;
+ bool runtime_support;
+
+ runtime_support = tdb_runtime_check_for_robust_mutexes();
+
+ ret = pipe(pipefd);
+ ok1(ret == 0);
+
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dsize = strlen("world");
+ data.dptr = discard_const_p(uint8_t, "world");
+
+ tdb = tdb_open_ex("mutex-openflags2.tdb", 0,
+ TDB_INCOMPATIBLE_HASH|
+ TDB_MUTEX_LOCKING,
+ O_RDWR|O_CREAT, 0755, &nolog_ctx, NULL);
+ ok((tdb == NULL) && (errno == EINVAL), "TDB_MUTEX_LOCKING without "
+ "TDB_CLEAR_IF_FIRST should fail with EINVAL - %d", errno);
+
+ if (!runtime_support) {
+ tdb = tdb_open_ex("mutex-openflags2.tdb", 0,
+ TDB_CLEAR_IF_FIRST|
+ TDB_MUTEX_LOCKING,
+ O_RDWR|O_CREAT, 0755, &nolog_ctx, NULL);
+ ok((tdb == NULL) && (errno == ENOSYS), "TDB_MUTEX_LOCKING without "
+ "runtime support should fail with ENOSYS - %d", errno);
+
+ skip(1, "No robust mutex support");
+ return exit_status();
+ }
+
+ child = fork();
+ if (child == 0) {
+ return do_child(pipefd[0]);
+ }
+
+ tdb = tdb_open_ex("mutex-openflags2.tdb", 0,
+ TDB_CLEAR_IF_FIRST|
+ TDB_MUTEX_LOCKING,
+ O_RDWR|O_CREAT, 0755, &log_ctx, NULL);
+ ok((tdb != NULL), "tdb_open_ex with mutexes should succeed");
+
+ write(pipefd[1], &c, 1);
+
+ wait_ret = wait(&status);
+ ok((wait_ret == child) && (status == 0),
+ "child should have exited correctly");
+
+ diag("done");
+ return exit_status();
+}
diff --git a/test/run-mutex-transaction1.c b/test/run-mutex-transaction1.c
new file mode 100644
index 0000000..7b9f7b1
--- /dev/null
+++ b/test/run-mutex-transaction1.c
@@ -0,0 +1,236 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <stdarg.h>
+
+static TDB_DATA key, data;
+
+static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level,
+ const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+
+static int do_child(int tdb_flags, int to, int from)
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+ int ret;
+ char c = 0;
+
+ tdb = tdb_open_ex("mutex-transaction1.tdb", 3, tdb_flags,
+ O_RDWR|O_CREAT, 0755, &log_ctx, NULL);
+ ok(tdb, "tdb_open_ex should succeed");
+
+ ret = tdb_transaction_start(tdb);
+ ok(ret == 0, "tdb_transaction_start should succeed");
+
+ ret = tdb_store(tdb, key, data, TDB_INSERT);
+ ok(ret == 0, "tdb_store(tdb, key, data, TDB_INSERT) should succeed");
+
+ write(to, &c, sizeof(c));
+ read(from, &c, sizeof(c));
+
+ ret = tdb_transaction_cancel(tdb);
+ ok(ret == 0, "tdb_transaction_cancel should succeed");
+
+ write(to, &c, sizeof(c));
+ read(from, &c, sizeof(c));
+
+ ret = tdb_transaction_start(tdb);
+ ok(ret == 0, "tdb_transaction_start should succeed");
+
+ ret = tdb_store(tdb, key, data, TDB_INSERT);
+ ok(ret == 0, "tdb_store(tdb, key, data, TDB_INSERT) should succeed");
+
+ write(to, &c, sizeof(c));
+ read(from, &c, sizeof(c));
+
+ ret = tdb_transaction_commit(tdb);
+ ok(ret == 0, "tdb_transaction_commit should succeed");
+
+ write(to, &c, sizeof(c));
+ read(from, &c, sizeof(c));
+
+ ret = tdb_transaction_start(tdb);
+ ok(ret == 0, "tdb_transaction_start should succeed");
+
+ ret = tdb_store(tdb, key, key, TDB_REPLACE);
+ ok(ret == 0, "tdb_store(tdb, key, data, TDB_REPLACE) should succeed");
+
+ write(to, &c, sizeof(c));
+ read(from, &c, sizeof(c));
+
+ ret = tdb_transaction_commit(tdb);
+ ok(ret == 0, "tdb_transaction_commit should succeed");
+
+ write(to, &c, sizeof(c));
+ read(from, &c, sizeof(c));
+
+ return 0;
+}
+
+/* The code should barf on TDBs created with rwlocks. */
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+ int ret, status;
+ pid_t child, wait_ret;
+ int fromchild[2];
+ int tochild[2];
+ TDB_DATA val;
+ char c;
+ int tdb_flags;
+ bool runtime_support;
+
+ runtime_support = tdb_runtime_check_for_robust_mutexes();
+
+ if (!runtime_support) {
+ skip(1, "No robust mutex support");
+ return exit_status();
+ }
+
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dsize = strlen("world");
+ data.dptr = discard_const_p(uint8_t, "world");
+
+ pipe(fromchild);
+ pipe(tochild);
+
+ tdb_flags = TDB_INCOMPATIBLE_HASH|
+ TDB_MUTEX_LOCKING|
+ TDB_CLEAR_IF_FIRST;
+
+ child = fork();
+ if (child == 0) {
+ close(fromchild[0]);
+ close(tochild[1]);
+ return do_child(tdb_flags, fromchild[1], tochild[0]);
+ }
+ close(fromchild[1]);
+ close(tochild[0]);
+
+ read(fromchild[0], &c, sizeof(c));
+
+ tdb = tdb_open_ex("mutex-transaction1.tdb", 0,
+ tdb_flags, O_RDWR|O_CREAT, 0755,
+ &log_ctx, NULL);
+ ok(tdb, "tdb_open_ex should succeed");
+
+ /*
+ * The child has the transaction running
+ */
+ ret = tdb_transaction_start_nonblock(tdb);
+ ok(ret == -1, "tdb_transaction_start_nonblock not succeed");
+
+ ret = tdb_chainlock_nonblock(tdb, key);
+ ok(ret == -1, "tdb_chainlock_nonblock should not succeed");
+
+ /*
+ * We can still read
+ */
+ ret = tdb_exists(tdb, key);
+ ok(ret == 0, "tdb_exists(tdb, key) should return 0");
+
+ val = tdb_fetch(tdb, key);
+ ok(val.dsize == 0, "tdb_fetch(tdb, key) should return an empty value");
+
+ write(tochild[1], &c, sizeof(c));
+
+ /*
+ * When the child canceled we can start...
+ */
+ ret = tdb_transaction_start(tdb);
+ ok(ret == 0, "tdb_transaction_start should succeed");
+
+ read(fromchild[0], &c, sizeof(c));
+ write(tochild[1], &c, sizeof(c));
+
+ ret = tdb_transaction_cancel(tdb);
+ ok(ret == 0, "tdb_transaction_cancel should succeed");
+
+ /*
+ * When we canceled the child can start and store...
+ */
+ read(fromchild[0], &c, sizeof(c));
+
+ /*
+ * We still see the old values before the child commits...
+ */
+ ret = tdb_exists(tdb, key);
+ ok(ret == 0, "tdb_exists(tdb, key) should return 0");
+
+ val = tdb_fetch(tdb, key);
+ ok(val.dsize == 0, "tdb_fetch(tdb, key) should return an empty value");
+
+ write(tochild[1], &c, sizeof(c));
+ read(fromchild[0], &c, sizeof(c));
+
+ /*
+ * We see the new values after the commit...
+ */
+ ret = tdb_exists(tdb, key);
+ ok(ret == 1, "tdb_exists(tdb, key) should return 1");
+
+ val = tdb_fetch(tdb, key);
+ ok(val.dsize != 0, "tdb_fetch(tdb, key) should return a value");
+ ok(val.dsize == data.dsize, "tdb_fetch(tdb, key) should return a value");
+ ok(memcmp(val.dptr, data.dptr, data.dsize) == 0, "tdb_fetch(tdb, key) should return a value");
+
+ write(tochild[1], &c, sizeof(c));
+ read(fromchild[0], &c, sizeof(c));
+
+ /*
+ * The child started a new transaction and replaces the value,
+ * but we still see the old values before the child commits...
+ */
+ ret = tdb_exists(tdb, key);
+ ok(ret == 1, "tdb_exists(tdb, key) should return 1");
+
+ val = tdb_fetch(tdb, key);
+ ok(val.dsize != 0, "tdb_fetch(tdb, key) should return a value");
+ ok(val.dsize == data.dsize, "tdb_fetch(tdb, key) should return a value");
+ ok(memcmp(val.dptr, data.dptr, data.dsize) == 0, "tdb_fetch(tdb, key) should return a value");
+
+ write(tochild[1], &c, sizeof(c));
+ read(fromchild[0], &c, sizeof(c));
+
+ /*
+ * We see the new values after the commit...
+ */
+ ret = tdb_exists(tdb, key);
+ ok(ret == 1, "tdb_exists(tdb, key) should return 1");
+
+ val = tdb_fetch(tdb, key);
+ ok(val.dsize != 0, "tdb_fetch(tdb, key) should return a value");
+ ok(val.dsize == key.dsize, "tdb_fetch(tdb, key) should return a value");
+ ok(memcmp(val.dptr, key.dptr, key.dsize) == 0, "tdb_fetch(tdb, key) should return a value");
+
+ write(tochild[1], &c, sizeof(c));
+
+ wait_ret = wait(&status);
+ ok(wait_ret == child, "child should have exited correctly");
+
+ diag("done");
+ return exit_status();
+}
diff --git a/test/run-mutex-trylock.c b/test/run-mutex-trylock.c
new file mode 100644
index 0000000..c96b635
--- /dev/null
+++ b/test/run-mutex-trylock.c
@@ -0,0 +1,122 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <stdarg.h>
+
+static TDB_DATA key, data;
+
+static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level,
+ const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+
+static int do_child(int tdb_flags, int to, int from)
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+ int ret;
+ char c = 0;
+
+ tdb = tdb_open_ex("mutex-trylock.tdb", 0, tdb_flags,
+ O_RDWR|O_CREAT, 0755, &log_ctx, NULL);
+ ok(tdb, "tdb_open_ex should succeed");
+
+ ret = tdb_chainlock(tdb, key);
+ ok(ret == 0, "tdb_chainlock should succeed");
+
+ write(to, &c, sizeof(c));
+
+ read(from, &c, sizeof(c));
+
+ ret = tdb_chainunlock(tdb, key);
+ ok(ret == 0, "tdb_chainunlock should succeed");
+
+ write(to, &c, sizeof(c));
+
+ return 0;
+}
+
+/* The code should barf on TDBs created with rwlocks. */
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+ int ret, status;
+ pid_t child, wait_ret;
+ int fromchild[2];
+ int tochild[2];
+ char c;
+ int tdb_flags;
+ bool runtime_support;
+
+ runtime_support = tdb_runtime_check_for_robust_mutexes();
+
+ if (!runtime_support) {
+ skip(1, "No robust mutex support");
+ return exit_status();
+ }
+
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dsize = strlen("world");
+ data.dptr = discard_const_p(uint8_t, "world");
+
+ pipe(fromchild);
+ pipe(tochild);
+
+ tdb_flags = TDB_INCOMPATIBLE_HASH|
+ TDB_MUTEX_LOCKING|
+ TDB_CLEAR_IF_FIRST;
+
+ child = fork();
+ if (child == 0) {
+ close(fromchild[0]);
+ close(tochild[1]);
+ return do_child(tdb_flags, fromchild[1], tochild[0]);
+ }
+ close(fromchild[1]);
+ close(tochild[0]);
+
+ read(fromchild[0], &c, sizeof(c));
+
+ tdb = tdb_open_ex("mutex-trylock.tdb", 0, tdb_flags,
+ O_RDWR|O_CREAT, 0755, &log_ctx, NULL);
+ ok(tdb, "tdb_open_ex should succeed");
+
+ ret = tdb_chainlock_nonblock(tdb, key);
+ ok(ret == -1, "tdb_chainlock_nonblock should not succeed");
+
+ write(tochild[1], &c, sizeof(c));
+
+ read(fromchild[0], &c, sizeof(c));
+
+ ret = tdb_chainlock_nonblock(tdb, key);
+ ok(ret == 0, "tdb_chainlock_nonblock should succeed");
+ ret = tdb_chainunlock(tdb, key);
+ ok(ret == 0, "tdb_chainunlock should succeed");
+
+ wait_ret = wait(&status);
+ ok(wait_ret == child, "child should have exited correctly");
+
+ diag("done");
+ return exit_status();
+}
diff --git a/test/run-mutex1.c b/test/run-mutex1.c
new file mode 100644
index 0000000..eb75946
--- /dev/null
+++ b/test/run-mutex1.c
@@ -0,0 +1,138 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <stdarg.h>
+
+static TDB_DATA key, data;
+
+static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level,
+ const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+
+static int do_child(int tdb_flags, int to, int from)
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+ int ret;
+ char c = 0;
+
+ tdb = tdb_open_ex("mutex1.tdb", 0, tdb_flags,
+ O_RDWR|O_CREAT, 0755, &log_ctx, NULL);
+ ok(tdb, "tdb_open_ex should succeed");
+
+ ret = tdb_chainlock(tdb, key);
+ ok(ret == 0, "tdb_chainlock should succeed");
+
+ write(to, &c, sizeof(c));
+ read(from, &c, sizeof(c));
+
+ ret = tdb_chainunlock(tdb, key);
+ ok(ret == 0, "tdb_chainunlock should succeed");
+
+ write(to, &c, sizeof(c));
+ read(from, &c, sizeof(c));
+
+ ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
+ ok(ret == 0, "tdb_allrecord_lock should succeed");
+
+ write(to, &c, sizeof(c));
+ read(from, &c, sizeof(c));
+
+ ret = tdb_allrecord_unlock(tdb, F_WRLCK, false);
+ ok(ret == 0, "tdb_allrecord_lock should succeed");
+
+ return 0;
+}
+
+/* The code should barf on TDBs created with rwlocks. */
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+ int ret, status;
+ pid_t child, wait_ret;
+ int fromchild[2];
+ int tochild[2];
+ char c;
+ int tdb_flags;
+ bool runtime_support;
+
+ runtime_support = tdb_runtime_check_for_robust_mutexes();
+
+ if (!runtime_support) {
+ skip(1, "No robust mutex support");
+ return exit_status();
+ }
+
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dsize = strlen("world");
+ data.dptr = discard_const_p(uint8_t, "world");
+
+ pipe(fromchild);
+ pipe(tochild);
+
+ tdb_flags = TDB_INCOMPATIBLE_HASH|
+ TDB_MUTEX_LOCKING|
+ TDB_CLEAR_IF_FIRST;
+
+ child = fork();
+ if (child == 0) {
+ close(fromchild[0]);
+ close(tochild[1]);
+ return do_child(tdb_flags, fromchild[1], tochild[0]);
+ }
+ close(fromchild[1]);
+ close(tochild[0]);
+
+ read(fromchild[0], &c, sizeof(c));
+
+ tdb = tdb_open_ex("mutex1.tdb", 0, tdb_flags,
+ O_RDWR|O_CREAT, 0755, &log_ctx, NULL);
+ ok(tdb, "tdb_open_ex should succeed");
+
+ write(tochild[1], &c, sizeof(c));
+ read(fromchild[0], &c, sizeof(c));
+
+ ret = tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false);
+ ok(ret == 0, "tdb_allrecord_lock should succeed");
+
+ ret = tdb_store(tdb, key, data, 0);
+ ok(ret == 0, "tdb_store should succeed");
+
+ ret = tdb_allrecord_unlock(tdb, F_WRLCK, false);
+ ok(ret == 0, "tdb_allrecord_unlock should succeed");
+
+ write(tochild[1], &c, sizeof(c));
+ read(fromchild[0], &c, sizeof(c));
+ write(tochild[1], &c, sizeof(c));
+
+ ret = tdb_delete(tdb, key);
+ ok(ret == 0, "tdb_delete should succeed");
+
+ wait_ret = wait(&status);
+ ok(wait_ret == child, "child should have exited correctly");
+
+ diag("done");
+ return exit_status();
+}
diff --git a/test/run-nested-transactions.c b/test/run-nested-transactions.c
new file mode 100644
index 0000000..864adf2
--- /dev/null
+++ b/test/run-nested-transactions.c
@@ -0,0 +1,79 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include <stdbool.h>
+#include "logging.h"
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ TDB_DATA key, data;
+
+ plan_tests(27);
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+
+ tdb = tdb_open_ex("run-nested-transactions.tdb",
+ 1024, TDB_CLEAR_IF_FIRST|TDB_DISALLOW_NESTING,
+ O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL);
+ ok1(tdb);
+
+ /* Nesting disallowed. */
+ ok1(tdb_transaction_start(tdb) == 0);
+ data.dptr = discard_const_p(uint8_t, "world");
+ data.dsize = strlen("world");
+ ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0);
+ data = tdb_fetch(tdb, key);
+ ok1(data.dsize == strlen("world"));
+ ok1(memcmp(data.dptr, "world", strlen("world")) == 0);
+ free(data.dptr);
+ ok1(tdb_transaction_start(tdb) != 0);
+ ok1(tdb_error(tdb) == TDB_ERR_NESTING);
+
+ data = tdb_fetch(tdb, key);
+ ok1(data.dsize == strlen("world"));
+ ok1(memcmp(data.dptr, "world", strlen("world")) == 0);
+ free(data.dptr);
+ ok1(tdb_transaction_commit(tdb) == 0);
+ data = tdb_fetch(tdb, key);
+ ok1(data.dsize == strlen("world"));
+ ok1(memcmp(data.dptr, "world", strlen("world")) == 0);
+ free(data.dptr);
+ tdb_close(tdb);
+
+ /* Nesting allowed by default */
+ tdb = tdb_open_ex("run-nested-transactions.tdb",
+ 1024, TDB_DEFAULT, O_RDWR, 0, &taplogctx, NULL);
+ ok1(tdb);
+
+ ok1(tdb_transaction_start(tdb) == 0);
+ ok1(tdb_transaction_start(tdb) == 0);
+ ok1(tdb_delete(tdb, key) == 0);
+ ok1(tdb_transaction_commit(tdb) == 0);
+ ok1(!tdb_exists(tdb, key));
+ ok1(tdb_transaction_cancel(tdb) == 0);
+ /* Surprise! Kills inner "committed" transaction. */
+ ok1(tdb_exists(tdb, key));
+
+ ok1(tdb_transaction_start(tdb) == 0);
+ ok1(tdb_transaction_start(tdb) == 0);
+ ok1(tdb_delete(tdb, key) == 0);
+ ok1(tdb_transaction_commit(tdb) == 0);
+ ok1(!tdb_exists(tdb, key));
+ ok1(tdb_transaction_commit(tdb) == 0);
+ ok1(!tdb_exists(tdb, key));
+ tdb_close(tdb);
+
+ return exit_status();
+}
diff --git a/test/run-nested-traverse.c b/test/run-nested-traverse.c
new file mode 100644
index 0000000..22ee3e2
--- /dev/null
+++ b/test/run-nested-traverse.c
@@ -0,0 +1,88 @@
+#include "../common/tdb_private.h"
+#include "lock-tracking.h"
+#define fcntl fcntl_with_lockcheck
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#undef fcntl
+#include <stdlib.h>
+#include <stdbool.h>
+#include "external-agent.h"
+#include "logging.h"
+
+static struct agent *agent;
+
+static bool correct_key(TDB_DATA key)
+{
+ return key.dsize == strlen("hi")
+ && memcmp(key.dptr, "hi", key.dsize) == 0;
+}
+
+static bool correct_data(TDB_DATA data)
+{
+ return data.dsize == strlen("world")
+ && memcmp(data.dptr, "world", data.dsize) == 0;
+}
+
+static int traverse2(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
+ void *p)
+{
+ ok1(correct_key(key));
+ ok1(correct_data(data));
+ return 0;
+}
+
+static int traverse1(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
+ void *p)
+{
+ ok1(correct_key(key));
+ ok1(correct_data(data));
+ ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb))
+ == WOULD_HAVE_BLOCKED);
+ tdb_traverse(tdb, traverse2, NULL);
+
+ /* That should *not* release the transaction lock! */
+ ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb))
+ == WOULD_HAVE_BLOCKED);
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ TDB_DATA key, data;
+
+ plan_tests(17);
+ agent = prepare_external_agent();
+
+ tdb = tdb_open_ex("run-nested-traverse.tdb", 1024, TDB_CLEAR_IF_FIRST,
+ O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL);
+ ok1(tdb);
+
+ ok1(external_agent_operation(agent, OPEN, tdb_name(tdb)) == SUCCESS);
+ ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb))
+ == SUCCESS);
+ ok1(external_agent_operation(agent, TRANSACTION_COMMIT, tdb_name(tdb))
+ == SUCCESS);
+
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dptr = discard_const_p(uint8_t, "world");
+ data.dsize = strlen("world");
+
+ ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0);
+ tdb_traverse(tdb, traverse1, NULL);
+ tdb_traverse_read(tdb, traverse1, NULL);
+ tdb_close(tdb);
+
+ return exit_status();
+}
diff --git a/test/run-no-lock-during-traverse.c b/test/run-no-lock-during-traverse.c
new file mode 100644
index 0000000..737a32f
--- /dev/null
+++ b/test/run-no-lock-during-traverse.c
@@ -0,0 +1,114 @@
+#include "../common/tdb_private.h"
+#include "lock-tracking.h"
+
+#define fcntl fcntl_with_lockcheck
+
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include "logging.h"
+
+#undef fcntl
+
+#define NUM_ENTRIES 10
+
+static bool prepare_entries(struct tdb_context *tdb)
+{
+ unsigned int i;
+ TDB_DATA key, data;
+
+ for (i = 0; i < NUM_ENTRIES; i++) {
+ key.dsize = sizeof(i);
+ key.dptr = (void *)&i;
+ data.dsize = strlen("world");
+ data.dptr = discard_const_p(uint8_t, "world");
+
+ if (tdb_store(tdb, key, data, 0) != 0)
+ return false;
+ }
+ return true;
+}
+
+static void delete_entries(struct tdb_context *tdb)
+{
+ unsigned int i;
+ TDB_DATA key;
+
+ for (i = 0; i < NUM_ENTRIES; i++) {
+ key.dsize = sizeof(i);
+ key.dptr = (void *)&i;
+
+ ok1(tdb_delete(tdb, key) == 0);
+ }
+}
+
+/* We don't know how many times this will run. */
+static int delete_other(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
+ void *private_data)
+{
+ unsigned int i;
+ memcpy(&i, key.dptr, 4);
+ i = (i + 1) % NUM_ENTRIES;
+ key.dptr = (void *)&i;
+ if (tdb_delete(tdb, key) != 0)
+ (*(int *)private_data)++;
+ return 0;
+}
+
+static int delete_self(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
+ void *private_data)
+{
+ ok1(tdb_delete(tdb, key) == 0);
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ int errors = 0;
+
+ plan_tests(41);
+ tdb = tdb_open_ex("run-no-lock-during-traverse.tdb",
+ 1024, TDB_CLEAR_IF_FIRST, O_CREAT|O_TRUNC|O_RDWR,
+ 0600, &taplogctx, NULL);
+
+ ok1(tdb);
+ ok1(prepare_entries(tdb));
+ ok1(locking_errors == 0);
+ ok1(tdb_lockall(tdb) == 0);
+ ok1(locking_errors == 0);
+ tdb_traverse(tdb, delete_other, &errors);
+ ok1(errors == 0);
+ ok1(locking_errors == 0);
+ ok1(tdb_unlockall(tdb) == 0);
+
+ ok1(prepare_entries(tdb));
+ ok1(locking_errors == 0);
+ ok1(tdb_lockall(tdb) == 0);
+ ok1(locking_errors == 0);
+ tdb_traverse(tdb, delete_self, NULL);
+ ok1(locking_errors == 0);
+ ok1(tdb_unlockall(tdb) == 0);
+
+ ok1(prepare_entries(tdb));
+ ok1(locking_errors == 0);
+ ok1(tdb_lockall(tdb) == 0);
+ ok1(locking_errors == 0);
+ delete_entries(tdb);
+ ok1(locking_errors == 0);
+ ok1(tdb_unlockall(tdb) == 0);
+
+ ok1(tdb_close(tdb) == 0);
+
+ return exit_status();
+}
diff --git a/test/run-oldhash.c b/test/run-oldhash.c
new file mode 100644
index 0000000..aaee6f6
--- /dev/null
+++ b/test/run-oldhash.c
@@ -0,0 +1,50 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include "logging.h"
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+
+ plan_tests(8);
+
+ /* Old format (with zeroes in the hash magic fields) should
+ * open with any hash (since we don't know what hash they used). */
+ tdb = tdb_open_ex("test/old-nohash-le.tdb", 0, 0, O_RDWR, 0,
+ &taplogctx, NULL);
+ ok1(tdb);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ tdb = tdb_open_ex("test/old-nohash-be.tdb", 0, 0, O_RDWR, 0,
+ &taplogctx, NULL);
+ ok1(tdb);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ tdb = tdb_open_ex("test/old-nohash-le.tdb", 0, 0, O_RDWR, 0,
+ &taplogctx, tdb_jenkins_hash);
+ ok1(tdb);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ tdb = tdb_open_ex("test/old-nohash-be.tdb", 0, 0, O_RDWR, 0,
+ &taplogctx, tdb_jenkins_hash);
+ ok1(tdb);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ return exit_status();
+}
diff --git a/test/run-open-during-transaction.c b/test/run-open-during-transaction.c
new file mode 100644
index 0000000..9a6c6c1
--- /dev/null
+++ b/test/run-open-during-transaction.c
@@ -0,0 +1,183 @@
+#include "../common/tdb_private.h"
+#include "lock-tracking.h"
+
+static ssize_t pwrite_check(int fd, const void *buf, size_t count, off_t offset);
+static ssize_t write_check(int fd, const void *buf, size_t count);
+static int ftruncate_check(int fd, off_t length);
+
+#define pwrite pwrite_check
+#define write write_check
+#define fcntl fcntl_with_lockcheck
+#define ftruncate ftruncate_check
+
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include "external-agent.h"
+#include "logging.h"
+
+static struct agent *agent;
+static bool opened;
+static int errors = 0;
+static bool clear_if_first;
+#define TEST_DBNAME "run-open-during-transaction.tdb"
+
+#undef write
+#undef pwrite
+#undef fcntl
+#undef ftruncate
+
+static bool is_same(const char *snapshot, const char *latest, off_t len)
+{
+ unsigned i;
+
+ for (i = 0; i < len; i++) {
+ if (snapshot[i] != latest[i])
+ return false;
+ }
+ return true;
+}
+
+static bool compare_file(int fd, const char *snapshot, off_t snapshot_len)
+{
+ char *contents;
+ bool same;
+
+ /* over-length read serves as length check. */
+ contents = malloc(snapshot_len+1);
+ same = pread(fd, contents, snapshot_len+1, 0) == snapshot_len
+ && is_same(snapshot, contents, snapshot_len);
+ free(contents);
+ return same;
+}
+
+static void check_file_intact(int fd)
+{
+ enum agent_return ret;
+ struct stat st;
+ char *contents;
+
+ fstat(fd, &st);
+ contents = malloc(st.st_size);
+ if (pread(fd, contents, st.st_size, 0) != st.st_size) {
+ diag("Read fail");
+ errors++;
+ free(contents);
+ return;
+ }
+
+ /* Ask agent to open file. */
+ ret = external_agent_operation(agent, clear_if_first ?
+ OPEN_WITH_CLEAR_IF_FIRST :
+ OPEN,
+ TEST_DBNAME);
+
+ /* It's OK to open it, but it must not have changed! */
+ if (!compare_file(fd, contents, st.st_size)) {
+ diag("Agent changed file after opening %s",
+ agent_return_name(ret));
+ errors++;
+ }
+
+ if (ret == SUCCESS) {
+ ret = external_agent_operation(agent, CLOSE, NULL);
+ if (ret != SUCCESS) {
+ diag("Agent failed to close tdb: %s",
+ agent_return_name(ret));
+ errors++;
+ }
+ } else if (ret != WOULD_HAVE_BLOCKED) {
+ diag("Agent opening file gave %s",
+ agent_return_name(ret));
+ errors++;
+ }
+
+ free(contents);
+}
+
+static void after_unlock(int fd)
+{
+ if (opened)
+ check_file_intact(fd);
+}
+
+static ssize_t pwrite_check(int fd,
+ const void *buf, size_t count, off_t offset)
+{
+ if (opened)
+ check_file_intact(fd);
+
+ return pwrite(fd, buf, count, offset);
+}
+
+static ssize_t write_check(int fd, const void *buf, size_t count)
+{
+ if (opened)
+ check_file_intact(fd);
+
+ return write(fd, buf, count);
+}
+
+static int ftruncate_check(int fd, off_t length)
+{
+ if (opened)
+ check_file_intact(fd);
+
+ return ftruncate(fd, length);
+
+}
+
+int main(int argc, char *argv[])
+{
+ const int flags[] = { TDB_DEFAULT,
+ TDB_CLEAR_IF_FIRST,
+ TDB_NOMMAP,
+ TDB_CLEAR_IF_FIRST | TDB_NOMMAP };
+ int i;
+ struct tdb_context *tdb;
+ TDB_DATA key, data;
+
+ plan_tests(20);
+ agent = prepare_external_agent();
+
+ unlock_callback = after_unlock;
+ for (i = 0; i < sizeof(flags)/sizeof(flags[0]); i++) {
+ clear_if_first = (flags[i] & TDB_CLEAR_IF_FIRST);
+ diag("Test with %s and %s",
+ clear_if_first ? "CLEAR" : "DEFAULT",
+ (flags[i] & TDB_NOMMAP) ? "no mmap" : "mmap");
+ unlink(TEST_DBNAME);
+ tdb = tdb_open_ex(TEST_DBNAME, 1024, flags[i],
+ O_CREAT|O_TRUNC|O_RDWR, 0600,
+ &taplogctx, NULL);
+ ok1(tdb);
+
+ opened = true;
+ ok1(tdb_transaction_start(tdb) == 0);
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dptr = discard_const_p(uint8_t, "world");
+ data.dsize = strlen("world");
+
+ ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0);
+ ok1(tdb_transaction_commit(tdb) == 0);
+ ok(!errors, "We had %u open errors", errors);
+
+ opened = false;
+ tdb_close(tdb);
+ }
+
+ return exit_status();
+}
diff --git a/test/run-readonly-check.c b/test/run-readonly-check.c
new file mode 100644
index 0000000..c5e0f7d
--- /dev/null
+++ b/test/run-readonly-check.c
@@ -0,0 +1,53 @@
+/* We should be able to tdb_check a O_RDONLY tdb, and we were previously allowed
+ * to tdb_check() inside a transaction (though that's paranoia!). */
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include "logging.h"
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ TDB_DATA key, data;
+
+ plan_tests(11);
+ tdb = tdb_open_ex("run-readonly-check.tdb", 1024,
+ TDB_DEFAULT,
+ O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL);
+
+ ok1(tdb);
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dsize = strlen("world");
+ data.dptr = discard_const_p(uint8_t, "world");
+
+ ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+
+ /* We are also allowed to do a check inside a transaction. */
+ ok1(tdb_transaction_start(tdb) == 0);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ ok1(tdb_close(tdb) == 0);
+
+ tdb = tdb_open_ex("run-readonly-check.tdb", 1024,
+ TDB_DEFAULT, O_RDONLY, 0, &taplogctx, NULL);
+
+ ok1(tdb);
+ ok1(tdb_store(tdb, key, data, TDB_MODIFY) == -1);
+ ok1(tdb_error(tdb) == TDB_ERR_RDONLY);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ ok1(tdb_close(tdb) == 0);
+
+ return exit_status();
+}
diff --git a/test/run-rescue-find_entry.c b/test/run-rescue-find_entry.c
new file mode 100644
index 0000000..5d6f8f7
--- /dev/null
+++ b/test/run-rescue-find_entry.c
@@ -0,0 +1,51 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/rescue.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include "logging.h"
+
+#define NUM 20
+
+/* Binary searches are deceptively simple: easy to screw up! */
+int main(int argc, char *argv[])
+{
+ unsigned int i, j, n;
+ struct found f[NUM+1];
+ struct found_table table;
+
+ /* Set up array for searching. */
+ for (i = 0; i < NUM+1; i++) {
+ f[i].head = i * 3;
+ }
+ table.arr = f;
+
+ for (i = 0; i < NUM; i++) {
+ table.num = i;
+ for (j = 0; j < (i + 2) * 3; j++) {
+ n = find_entry(&table, j);
+ ok1(n <= i);
+
+ /* If we were searching for something too large... */
+ if (j > i*3)
+ ok1(n == i);
+ else {
+ /* It must give us something after j */
+ ok1(f[n].head >= j);
+ ok1(n == 0 || f[n-1].head < j);
+ }
+ }
+ }
+
+ return exit_status();
+}
diff --git a/test/run-rescue.c b/test/run-rescue.c
new file mode 100644
index 0000000..e43f53b
--- /dev/null
+++ b/test/run-rescue.c
@@ -0,0 +1,127 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/rescue.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include "logging.h"
+
+struct walk_data {
+ TDB_DATA key;
+ TDB_DATA data;
+ bool fail;
+ unsigned count;
+};
+
+static inline bool tdb_deq(TDB_DATA a, TDB_DATA b)
+{
+ return a.dsize == b.dsize && memcmp(a.dptr, b.dptr, a.dsize) == 0;
+}
+
+static inline TDB_DATA tdb_mkdata(const void *p, size_t len)
+{
+ TDB_DATA d;
+ d.dptr = discard_const_p(uint8_t, p);
+ d.dsize = len;
+ return d;
+}
+
+static void walk(TDB_DATA key, TDB_DATA data, void *_wd)
+{
+ struct walk_data *wd = _wd;
+
+ if (!tdb_deq(key, wd->key)) {
+ wd->fail = true;
+ }
+
+ if (!tdb_deq(data, wd->data)) {
+ wd->fail = true;
+ }
+ wd->count++;
+}
+
+static void count_records(TDB_DATA key, TDB_DATA data, void *_wd)
+{
+ struct walk_data *wd = _wd;
+
+ if (!tdb_deq(key, wd->key) || !tdb_deq(data, wd->data))
+ diag("%.*s::%.*s",
+ (int)key.dsize, key.dptr, (int)data.dsize, data.dptr);
+ wd->count++;
+}
+
+static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...)
+{
+ unsigned int *count = tdb_get_logging_private(tdb);
+ (*count)++;
+}
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ struct walk_data wd;
+ unsigned int i, size, log_count = 0;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+
+ plan_tests(8);
+ tdb = tdb_open_ex("run-rescue.tdb", 1, TDB_CLEAR_IF_FIRST,
+ O_CREAT|O_TRUNC|O_RDWR, 0600, &log_ctx, NULL);
+
+ wd.key.dsize = strlen("hi");
+ wd.key.dptr = discard_const_p(uint8_t, "hi");
+ wd.data.dsize = strlen("world");
+ wd.data.dptr = discard_const_p(uint8_t, "world");
+ wd.count = 0;
+ wd.fail = false;
+
+ ok1(tdb_store(tdb, wd.key, wd.data, TDB_INSERT) == 0);
+
+ ok1(tdb_rescue(tdb, walk, &wd) == 0);
+ ok1(!wd.fail);
+ ok1(wd.count == 1);
+
+ /* Corrupt the database, walk should either get it or not. */
+ size = tdb->map_size;
+ for (i = sizeof(struct tdb_header); i < size; i++) {
+ char c;
+ if (tdb->methods->tdb_read(tdb, i, &c, 1, false) != 0)
+ fail("Reading offset %i", i);
+ if (tdb->methods->tdb_write(tdb, i, "X", 1) != 0)
+ fail("Writing X at offset %i", i);
+
+ wd.count = 0;
+ if (tdb_rescue(tdb, count_records, &wd) != 0) {
+ wd.fail = true;
+ break;
+ }
+ /* Could be 0 or 1. */
+ if (wd.count > 1) {
+ wd.fail = true;
+ break;
+ }
+ if (tdb->methods->tdb_write(tdb, i, &c, 1) != 0)
+ fail("Restoring offset %i", i);
+ }
+ ok1(log_count == 0);
+ ok1(!wd.fail);
+ tdb_close(tdb);
+
+ /* Now try our known-corrupt db. */
+ tdb = tdb_open_ex("test/tdb.corrupt", 1024, 0, O_RDWR, 0,
+ &taplogctx, NULL);
+ wd.count = 0;
+ ok1(tdb_rescue(tdb, count_records, &wd) == 0);
+ ok1(wd.count == 1627);
+ tdb_close(tdb);
+
+ return exit_status();
+}
diff --git a/test/run-rwlock-check.c b/test/run-rwlock-check.c
new file mode 100644
index 0000000..2ac9dc3
--- /dev/null
+++ b/test/run-rwlock-check.c
@@ -0,0 +1,46 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+
+static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...)
+{
+ unsigned int *count = tdb_get_logging_private(tdb);
+ if (strstr(fmt, "spinlocks"))
+ (*count)++;
+}
+
+/* The code should barf on TDBs created with rwlocks. */
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+
+ plan_tests(4);
+
+ /* We should fail to open rwlock-using tdbs of either endian. */
+ log_count = 0;
+ tdb = tdb_open_ex("test/rwlock-le.tdb", 0, 0, O_RDWR, 0,
+ &log_ctx, NULL);
+ ok1(!tdb);
+ ok1(log_count == 1);
+
+ log_count = 0;
+ tdb = tdb_open_ex("test/rwlock-be.tdb", 0, 0, O_RDWR, 0,
+ &log_ctx, NULL);
+ ok1(!tdb);
+ ok1(log_count == 1);
+
+ return exit_status();
+}
diff --git a/test/run-summary.c b/test/run-summary.c
new file mode 100644
index 0000000..8b9a1a0
--- /dev/null
+++ b/test/run-summary.c
@@ -0,0 +1,65 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/summary.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+
+int main(int argc, char *argv[])
+{
+ unsigned int i, j;
+ struct tdb_context *tdb;
+ int flags[] = { TDB_INTERNAL, TDB_DEFAULT, TDB_NOMMAP,
+ TDB_INTERNAL|TDB_CONVERT, TDB_CONVERT,
+ TDB_NOMMAP|TDB_CONVERT };
+ TDB_DATA key = { (unsigned char *)&j, sizeof(j) };
+ TDB_DATA data = { (unsigned char *)&j, sizeof(j) };
+ char *summary;
+
+ plan_tests(sizeof(flags) / sizeof(flags[0]) * 14);
+ for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) {
+ tdb = tdb_open("run-summary.tdb", 131, flags[i],
+ O_RDWR|O_CREAT|O_TRUNC, 0600);
+ ok1(tdb);
+ if (!tdb)
+ continue;
+
+ /* Put some stuff in there. */
+ for (j = 0; j < 500; j++) {
+ /* Make sure padding varies to we get some graphs! */
+ data.dsize = j % (sizeof(j) + 1);
+ if (tdb_store(tdb, key, data, TDB_REPLACE) != 0)
+ fail("Storing in tdb");
+ }
+
+ summary = tdb_summary(tdb);
+ diag("%s", summary);
+ ok1(strstr(summary, "Size of file/data: "));
+ ok1(strstr(summary, "Number of records: 500\n"));
+ ok1(strstr(summary, "Smallest/average/largest keys: 4/4/4\n"));
+ ok1(strstr(summary, "Smallest/average/largest data: 0/2/4\n"));
+ ok1(strstr(summary, "Smallest/average/largest padding: "));
+ ok1(strstr(summary, "Number of dead records: 0\n"));
+ ok1(strstr(summary, "Number of free records: 1\n"));
+ ok1(strstr(summary, "Smallest/average/largest free records: "));
+ ok1(strstr(summary, "Number of hash chains: 131\n"));
+ ok1(strstr(summary, "Smallest/average/largest hash chains: "));
+ ok1(strstr(summary, "Number of uncoalesced records: 0\n"));
+ ok1(strstr(summary, "Smallest/average/largest uncoalesced runs: 0/0/0\n"));
+ ok1(strstr(summary, "Percentage keys/data/padding/free/dead/rechdrs&tailers/hashes: "));
+
+ free(summary);
+ tdb_close(tdb);
+ }
+
+ return exit_status();
+}
diff --git a/test/run-transaction-expand.c b/test/run-transaction-expand.c
new file mode 100644
index 0000000..d36b894
--- /dev/null
+++ b/test/run-transaction-expand.c
@@ -0,0 +1,125 @@
+#include "../common/tdb_private.h"
+
+/* Speed up the tests, but do the actual sync tests. */
+static unsigned int sync_counts = 0;
+static inline int fake_fsync(int fd)
+{
+ sync_counts++;
+ return 0;
+}
+#define fsync fake_fsync
+
+#ifdef MS_SYNC
+static inline int fake_msync(void *addr, size_t length, int flags)
+{
+ sync_counts++;
+ return 0;
+}
+#define msync fake_msync
+#endif
+
+#ifdef HAVE_FDATASYNC
+static inline int fake_fdatasync(int fd)
+{
+ sync_counts++;
+ return 0;
+}
+#define fdatasync fake_fdatasync
+#endif
+
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include "logging.h"
+
+static void write_record(struct tdb_context *tdb, size_t extra_len,
+ TDB_DATA *data)
+{
+ TDB_DATA key;
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+
+ data->dsize += extra_len;
+ tdb_transaction_start(tdb);
+ tdb_store(tdb, key, *data, TDB_REPLACE);
+ tdb_transaction_commit(tdb);
+}
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ size_t i;
+ TDB_DATA data;
+ struct tdb_record rec;
+ tdb_off_t off;
+
+ /* Do *not* suppress sync for this test; we do it ourselves. */
+ unsetenv("TDB_NO_FSYNC");
+
+ plan_tests(5);
+ tdb = tdb_open_ex("run-transaction-expand.tdb",
+ 1024, TDB_CLEAR_IF_FIRST,
+ O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL);
+ ok1(tdb);
+
+ data.dsize = 0;
+ data.dptr = calloc(1000, getpagesize());
+ if (data.dptr == NULL) {
+ diag("Unable to allocate memory for data.dptr");
+ tdb_close(tdb);
+ exit(1);
+ }
+
+ /* Simulate a slowly growing record. */
+ for (i = 0; i < 1000; i++)
+ write_record(tdb, getpagesize(), &data);
+
+ tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &off);
+ tdb_read(tdb, off, &rec, sizeof(rec), DOCONV());
+ diag("TDB size = %zu, recovery = %llu-%llu",
+ (size_t)tdb->map_size, (unsigned long long)off, (unsigned long long)(off + sizeof(rec) + rec.rec_len));
+
+ /* We should only be about 5 times larger than largest record. */
+ ok1(tdb->map_size < 6 * i * getpagesize());
+ tdb_close(tdb);
+
+ tdb = tdb_open_ex("run-transaction-expand.tdb",
+ 1024, TDB_CLEAR_IF_FIRST,
+ O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL);
+ ok1(tdb);
+
+ data.dsize = 0;
+
+ /* Simulate a slowly growing record, repacking to keep
+ * recovery area at end. */
+ for (i = 0; i < 1000; i++) {
+ write_record(tdb, getpagesize(), &data);
+ if (i % 10 == 0)
+ tdb_repack(tdb);
+ }
+
+ tdb_ofs_read(tdb, TDB_RECOVERY_HEAD, &off);
+ tdb_read(tdb, off, &rec, sizeof(rec), DOCONV());
+ diag("TDB size = %zu, recovery = %llu-%llu",
+ (size_t)tdb->map_size, (unsigned long long)off, (unsigned long long)(off + sizeof(rec) + rec.rec_len));
+
+ /* We should only be about 4 times larger than largest record. */
+ ok1(tdb->map_size < 5 * i * getpagesize());
+
+ /* We should have synchronized multiple times. */
+ ok1(sync_counts);
+ tdb_close(tdb);
+ free(data.dptr);
+
+ return exit_status();
+}
diff --git a/test/run-traverse-in-transaction.c b/test/run-traverse-in-transaction.c
new file mode 100644
index 0000000..17d6412
--- /dev/null
+++ b/test/run-traverse-in-transaction.c
@@ -0,0 +1,87 @@
+#include "lock-tracking.h"
+#include "../common/tdb_private.h"
+#define fcntl fcntl_with_lockcheck
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#undef fcntl_with_lockcheck
+#include <stdlib.h>
+#include <stdbool.h>
+#include "external-agent.h"
+#include "logging.h"
+
+static struct agent *agent;
+
+static bool correct_key(TDB_DATA key)
+{
+ return key.dsize == strlen("hi")
+ && memcmp(key.dptr, "hi", key.dsize) == 0;
+}
+
+static bool correct_data(TDB_DATA data)
+{
+ return data.dsize == strlen("world")
+ && memcmp(data.dptr, "world", data.dsize) == 0;
+}
+
+static int traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA data,
+ void *p)
+{
+ ok1(correct_key(key));
+ ok1(correct_data(data));
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ TDB_DATA key, data;
+
+ plan_tests(13);
+ agent = prepare_external_agent();
+
+ tdb = tdb_open_ex("run-traverse-in-transaction.tdb",
+ 1024, TDB_CLEAR_IF_FIRST, O_CREAT|O_TRUNC|O_RDWR,
+ 0600, &taplogctx, NULL);
+ ok1(tdb);
+
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dptr = discard_const_p(uint8_t, "world");
+ data.dsize = strlen("world");
+
+ ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0);
+
+ ok1(external_agent_operation(agent, OPEN, tdb_name(tdb)) == SUCCESS);
+
+ ok1(tdb_transaction_start(tdb) == 0);
+ ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb))
+ == WOULD_HAVE_BLOCKED);
+ tdb_traverse(tdb, traverse, NULL);
+
+ /* That should *not* release the transaction lock! */
+ ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb))
+ == WOULD_HAVE_BLOCKED);
+ tdb_traverse_read(tdb, traverse, NULL);
+
+ /* That should *not* release the transaction lock! */
+ ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb))
+ == WOULD_HAVE_BLOCKED);
+ ok1(tdb_transaction_commit(tdb) == 0);
+ /* Now we should be fine. */
+ ok1(external_agent_operation(agent, TRANSACTION_START, tdb_name(tdb))
+ == SUCCESS);
+
+ tdb_close(tdb);
+
+ return exit_status();
+}
diff --git a/test/run-wronghash-fail.c b/test/run-wronghash-fail.c
new file mode 100644
index 0000000..c44b0f5
--- /dev/null
+++ b/test/run-wronghash-fail.c
@@ -0,0 +1,121 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+
+static void log_fn(struct tdb_context *tdb, enum tdb_debug_level level, const char *fmt, ...)
+{
+ unsigned int *count = tdb_get_logging_private(tdb);
+ if (strstr(fmt, "hash"))
+ (*count)++;
+}
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ unsigned int log_count;
+ TDB_DATA d;
+ struct tdb_logging_context log_ctx = { log_fn, &log_count };
+
+ plan_tests(28);
+
+ /* Create with default hash. */
+ log_count = 0;
+ tdb = tdb_open_ex("run-wronghash-fail.tdb", 0, 0,
+ O_CREAT|O_RDWR|O_TRUNC, 0600, &log_ctx, NULL);
+ ok1(tdb);
+ ok1(log_count == 0);
+ d.dptr = discard_const_p(uint8_t, "Hello");
+ d.dsize = 5;
+ ok1(tdb_store(tdb, d, d, TDB_INSERT) == 0);
+ tdb_close(tdb);
+
+ /* Fail to open with different hash. */
+ tdb = tdb_open_ex("run-wronghash-fail.tdb", 0, 0, O_RDWR, 0,
+ &log_ctx, tdb_jenkins_hash);
+ ok1(!tdb);
+ ok1(log_count == 1);
+
+ /* Create with different hash. */
+ log_count = 0;
+ tdb = tdb_open_ex("run-wronghash-fail.tdb", 0, 0,
+ O_CREAT|O_RDWR|O_TRUNC,
+ 0600, &log_ctx, tdb_jenkins_hash);
+ ok1(tdb);
+ ok1(log_count == 0);
+ tdb_close(tdb);
+
+ /* Endian should be no problem. */
+ log_count = 0;
+ tdb = tdb_open_ex("test/jenkins-le-hash.tdb", 0, 0, O_RDWR, 0,
+ &log_ctx, tdb_old_hash);
+ ok1(!tdb);
+ ok1(log_count == 1);
+
+ log_count = 0;
+ tdb = tdb_open_ex("test/jenkins-be-hash.tdb", 0, 0, O_RDWR, 0,
+ &log_ctx, tdb_old_hash);
+ ok1(!tdb);
+ ok1(log_count == 1);
+
+ log_count = 0;
+ /* Fail to open with old default hash. */
+ tdb = tdb_open_ex("run-wronghash-fail.tdb", 0, 0, O_RDWR, 0,
+ &log_ctx, tdb_old_hash);
+ ok1(!tdb);
+ ok1(log_count == 1);
+
+ log_count = 0;
+ tdb = tdb_open_ex("test/jenkins-le-hash.tdb", 0, 0, O_RDONLY,
+ 0, &log_ctx, tdb_jenkins_hash);
+ ok1(tdb);
+ ok1(log_count == 0);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ log_count = 0;
+ tdb = tdb_open_ex("test/jenkins-be-hash.tdb", 0, 0, O_RDONLY,
+ 0, &log_ctx, tdb_jenkins_hash);
+ ok1(tdb);
+ ok1(log_count == 0);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ /* It should open with jenkins hash if we don't specify. */
+ log_count = 0;
+ tdb = tdb_open_ex("test/jenkins-le-hash.tdb", 0, 0, O_RDWR, 0,
+ &log_ctx, NULL);
+ ok1(tdb);
+ ok1(log_count == 0);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ log_count = 0;
+ tdb = tdb_open_ex("test/jenkins-be-hash.tdb", 0, 0, O_RDWR, 0,
+ &log_ctx, NULL);
+ ok1(tdb);
+ ok1(log_count == 0);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+ log_count = 0;
+ tdb = tdb_open_ex("run-wronghash-fail.tdb", 0, 0, O_RDONLY,
+ 0, &log_ctx, NULL);
+ ok1(tdb);
+ ok1(log_count == 0);
+ ok1(tdb_check(tdb, NULL, NULL) == 0);
+ tdb_close(tdb);
+
+
+ return exit_status();
+}
diff --git a/test/run-zero-append.c b/test/run-zero-append.c
new file mode 100644
index 0000000..f9eba1b
--- /dev/null
+++ b/test/run-zero-append.c
@@ -0,0 +1,41 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include "logging.h"
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ TDB_DATA key, data;
+
+ plan_tests(4);
+ tdb = tdb_open_ex(NULL, 1024, TDB_INTERNAL, O_CREAT|O_TRUNC|O_RDWR,
+ 0600, &taplogctx, NULL);
+ ok1(tdb);
+
+ /* Tickle bug on appending zero length buffer to zero length buffer. */
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dptr = discard_const_p(uint8_t, "world");
+ data.dsize = 0;
+
+ ok1(tdb_append(tdb, key, data) == 0);
+ ok1(tdb_append(tdb, key, data) == 0);
+ data = tdb_fetch(tdb, key);
+ ok1(data.dsize == 0);
+ tdb_close(tdb);
+ free(data.dptr);
+
+ return exit_status();
+}
diff --git a/test/run.c b/test/run.c
new file mode 100644
index 0000000..c744c4d
--- /dev/null
+++ b/test/run.c
@@ -0,0 +1,50 @@
+#include "../common/tdb_private.h"
+#include "../common/io.c"
+#include "../common/tdb.c"
+#include "../common/lock.c"
+#include "../common/freelist.c"
+#include "../common/traverse.c"
+#include "../common/transaction.c"
+#include "../common/error.c"
+#include "../common/open.c"
+#include "../common/check.c"
+#include "../common/hash.c"
+#include "../common/mutex.c"
+#include "tap-interface.h"
+#include <stdlib.h>
+#include "logging.h"
+
+int main(int argc, char *argv[])
+{
+ struct tdb_context *tdb;
+ TDB_DATA key, data;
+
+ plan_tests(10);
+ tdb = tdb_open_ex("run.tdb", 1024, TDB_CLEAR_IF_FIRST,
+ O_CREAT|O_TRUNC|O_RDWR, 0600, &taplogctx, NULL);
+
+ ok1(tdb);
+ key.dsize = strlen("hi");
+ key.dptr = discard_const_p(uint8_t, "hi");
+ data.dsize = strlen("world");
+ data.dptr = discard_const_p(uint8_t, "world");
+
+ ok1(tdb_store(tdb, key, data, TDB_MODIFY) < 0);
+ ok1(tdb_error(tdb) == TDB_ERR_NOEXIST);
+ ok1(tdb_store(tdb, key, data, TDB_INSERT) == 0);
+ ok1(tdb_store(tdb, key, data, TDB_INSERT) < 0);
+ ok1(tdb_error(tdb) == TDB_ERR_EXISTS);
+ ok1(tdb_store(tdb, key, data, TDB_MODIFY) == 0);
+
+ data = tdb_fetch(tdb, key);
+ ok1(data.dsize == strlen("world"));
+ ok1(memcmp(data.dptr, "world", strlen("world")) == 0);
+ free(data.dptr);
+
+ key.dsize++;
+ data = tdb_fetch(tdb, key);
+ ok1(data.dptr == NULL);
+ tdb_close(tdb);
+
+ return exit_status();
+}
diff --git a/test/rwlock-be.tdb b/test/rwlock-be.tdb
new file mode 100644
index 0000000..45b5f09
--- /dev/null
+++ b/test/rwlock-be.tdb
Binary files differ
diff --git a/test/rwlock-le.tdb b/test/rwlock-le.tdb
new file mode 100644
index 0000000..45b5f09
--- /dev/null
+++ b/test/rwlock-le.tdb
Binary files differ
diff --git a/test/tap-interface.h b/test/tap-interface.h
new file mode 100644
index 0000000..8f742d8
--- /dev/null
+++ b/test/tap-interface.h
@@ -0,0 +1,58 @@
+/*
+ Unix SMB/CIFS implementation.
+ Simplistic implementation of tap interface.
+
+ Copyright (C) Rusty Russell 2012
+
+ ** NOTE! The following LGPL license applies to the talloc
+ ** library. This does NOT imply that all of Samba is released
+ ** under the LGPL
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+*/
+#include <stdio.h>
+
+#ifndef __location__
+#define __TAP_STRING_LINE1__(s) #s
+#define __TAP_STRING_LINE2__(s) __TAP_STRING_LINE1__(s)
+#define __TAP_STRING_LINE3__ __TAP_STRING_LINE2__(__LINE__)
+#define __location__ __FILE__ ":" __TAP_STRING_LINE3__
+#endif
+
+#define plan_tests(num)
+#define fail(...) do { \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ fflush(stderr); \
+ exit(1); \
+} while(0)
+#define diag(...) do { \
+ fprintf(stdout, __VA_ARGS__); \
+ fprintf(stdout, "\n"); \
+ fflush(stdout); \
+} while(0)
+#define pass(...) do { \
+ fprintf(stdout, "."); \
+ fflush(stdout); \
+} while(0)
+#define ok(e, ...) do { \
+ if (e) { \
+ pass(); \
+ } else { \
+ fail(__VA_ARGS__); \
+ } \
+} while(0)
+#define ok1(e) ok((e), "%s:%s", __location__, #e)
+#define skip(n, ...) diag(__VA_ARGS__)
+#define exit_status() 0
diff --git a/test/tap-to-subunit.h b/test/tap-to-subunit.h
new file mode 100644
index 0000000..a5cf74f
--- /dev/null
+++ b/test/tap-to-subunit.h
@@ -0,0 +1,155 @@
+#ifndef TAP_TO_SUBUNIT_H
+#define TAP_TO_SUBUNIT_H
+/*
+ * tap-style wrapper for subunit.
+ *
+ * Copyright (c) 2011 Rusty Russell
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "replace.h"
+
+/**
+ * plan_tests - announce the number of tests you plan to run
+ * @tests: the number of tests
+ *
+ * This should be the first call in your test program: it allows tracing
+ * of failures which mean that not all tests are run.
+ *
+ * If you don't know how many tests will actually be run, assume all of them
+ * and use skip() if you don't actually run some tests.
+ *
+ * Example:
+ * plan_tests(13);
+ */
+void plan_tests(unsigned int tests);
+
+/**
+ * ok1 - Simple conditional test
+ * @e: the expression which we expect to be true.
+ *
+ * This is the simplest kind of test: if the expression is true, the
+ * test passes. The name of the test which is printed will simply be
+ * file name, line number, and the expression itself.
+ *
+ * Example:
+ * ok1(somefunc() == 1);
+ */
+# define ok1(e) ((e) ? \
+ _gen_result(1, __func__, __FILE__, __LINE__, "%s", #e) : \
+ _gen_result(0, __func__, __FILE__, __LINE__, "%s", #e))
+
+/**
+ * ok - Conditional test with a name
+ * @e: the expression which we expect to be true.
+ * @...: the printf-style name of the test.
+ *
+ * If the expression is true, the test passes. The name of the test will be
+ * the filename, line number, and the printf-style string. This can be clearer
+ * than simply the expression itself.
+ *
+ * Example:
+ * ok1(somefunc() == 1);
+ * ok(somefunc() == 0, "Second somefunc() should fail");
+ */
+# define ok(e, ...) ((e) ? \
+ _gen_result(1, __func__, __FILE__, __LINE__, \
+ __VA_ARGS__) : \
+ _gen_result(0, __func__, __FILE__, __LINE__, \
+ __VA_ARGS__))
+
+/**
+ * pass - Note that a test passed
+ * @...: the printf-style name of the test.
+ *
+ * For complicated code paths, it can be easiest to simply call pass() in one
+ * branch and fail() in another.
+ *
+ * Example:
+ * int x = somefunc();
+ * if (x > 0)
+ * pass("somefunc() returned a valid value");
+ * else
+ * fail("somefunc() returned an invalid value");
+ */
+# define pass(...) ok(1, __VA_ARGS__)
+
+/**
+ * fail - Note that a test failed
+ * @...: the printf-style name of the test.
+ *
+ * For complicated code paths, it can be easiest to simply call pass() in one
+ * branch and fail() in another.
+ */
+# define fail(...) ok(0, __VA_ARGS__)
+
+unsigned int _gen_result(int, const char *, const char *, unsigned int,
+ const char *, ...) PRINTF_ATTRIBUTE(5, 6);
+
+/**
+ * diag - print a diagnostic message (use instead of printf/fprintf)
+ * @fmt: the format of the printf-style message
+ *
+ * diag ensures that the output will not be considered to be a test
+ * result by the TAP test harness. It will append '\n' for you.
+ *
+ * Example:
+ * diag("Now running complex tests");
+ */
+void diag(const char *fmt, ...) PRINTF_ATTRIBUTE(1, 2);
+
+/**
+ * skip - print a diagnostic message (use instead of printf/fprintf)
+ * @n: number of tests you're skipping.
+ * @fmt: the format of the reason you're skipping the tests.
+ *
+ * Sometimes tests cannot be run because the test system lacks some feature:
+ * you should explicitly document that you're skipping tests using skip().
+ *
+ * From the Test::More documentation:
+ * If it's something the user might not be able to do, use SKIP. This
+ * includes optional modules that aren't installed, running under an OS that
+ * doesn't have some feature (like fork() or symlinks), or maybe you need an
+ * Internet connection and one isn't available.
+ *
+ * Example:
+ * #ifdef HAVE_SOME_FEATURE
+ * ok1(somefunc());
+ * #else
+ * skip(1, "Don't have SOME_FEATURE");
+ * #endif
+ */
+void skip(unsigned int n, const char *fmt, ...) PRINTF_ATTRIBUTE(2, 3);
+
+/**
+ * exit_status - the value that main should return.
+ *
+ * For maximum compatibility your test program should return a particular exit
+ * code (ie. 0 if all tests were run, and every test which was expected to
+ * succeed succeeded).
+ *
+ * Example:
+ * exit(exit_status());
+ */
+int exit_status(void);
+#endif /* CCAN_TAP_H */
diff --git a/test/tdb.corrupt b/test/tdb.corrupt
new file mode 100644
index 0000000..83d6677
--- /dev/null
+++ b/test/tdb.corrupt
Binary files differ
diff --git a/third_party/waf/wafadmin/3rdparty/ParallelDebug.py b/third_party/waf/wafadmin/3rdparty/ParallelDebug.py
new file mode 100644
index 0000000..0ff580e
--- /dev/null
+++ b/third_party/waf/wafadmin/3rdparty/ParallelDebug.py
@@ -0,0 +1,297 @@
+#! /usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2007-2010 (ita)
+
+"""
+debugging helpers for parallel compilation, outputs
+a svg file in the build directory
+"""
+
+import os, time, sys, threading
+try: from Queue import Queue
+except: from queue import Queue
+import Runner, Options, Utils, Task, Logs
+from Constants import *
+
+#import random
+#random.seed(100)
+
+def set_options(opt):
+ opt.add_option('--dtitle', action='store', default='Parallel build representation for %r' % ' '.join(sys.argv),
+ help='title for the svg diagram', dest='dtitle')
+ opt.add_option('--dwidth', action='store', type='int', help='diagram width', default=1000, dest='dwidth')
+ opt.add_option('--dtime', action='store', type='float', help='recording interval in seconds', default=0.009, dest='dtime')
+ opt.add_option('--dband', action='store', type='int', help='band width', default=22, dest='dband')
+ opt.add_option('--dmaxtime', action='store', type='float', help='maximum time, for drawing fair comparisons', default=0, dest='dmaxtime')
+
+# red #ff4d4d
+# green #4da74d
+# lila #a751ff
+
+color2code = {
+ 'GREEN' : '#4da74d',
+ 'YELLOW' : '#fefe44',
+ 'PINK' : '#a751ff',
+ 'RED' : '#cc1d1d',
+ 'BLUE' : '#6687bb',
+ 'CYAN' : '#34e2e2',
+
+}
+
+mp = {}
+info = [] # list of (text,color)
+
+def map_to_color(name):
+ if name in mp:
+ return mp[name]
+ try:
+ cls = Task.TaskBase.classes[name]
+ except KeyError:
+ return color2code['RED']
+ if cls.color in mp:
+ return mp[cls.color]
+ if cls.color in color2code:
+ return color2code[cls.color]
+ return color2code['RED']
+
+def loop(self):
+ while 1:
+ tsk=Runner.TaskConsumer.ready.get()
+ tsk.master.set_running(1, id(threading.currentThread()), tsk)
+ Runner.process_task(tsk)
+ tsk.master.set_running(-1, id(threading.currentThread()), tsk)
+Runner.TaskConsumer.loop = loop
+
+
+old_start = Runner.Parallel.start
+def do_start(self):
+ print Options.options
+ try:
+ Options.options.dband
+ except AttributeError:
+ raise ValueError('use def options(opt): opt.load("parallel_debug")!')
+
+ self.taskinfo = Queue()
+ old_start(self)
+ process_colors(self)
+Runner.Parallel.start = do_start
+
+def set_running(self, by, i, tsk):
+ self.taskinfo.put( (i, id(tsk), time.time(), tsk.__class__.__name__, self.processed, self.count, by) )
+Runner.Parallel.set_running = set_running
+
+def name2class(name):
+ return name.replace(' ', '_').replace('.', '_')
+
+def process_colors(producer):
+ # first, cast the parameters
+ tmp = []
+ try:
+ while True:
+ tup = producer.taskinfo.get(False)
+ tmp.append(list(tup))
+ except:
+ pass
+
+ try:
+ ini = float(tmp[0][2])
+ except:
+ return
+
+ if not info:
+ seen = []
+ for x in tmp:
+ name = x[3]
+ if not name in seen:
+ seen.append(name)
+ else:
+ continue
+
+ info.append((name, map_to_color(name)))
+ info.sort(key=lambda x: x[0])
+
+ thread_count = 0
+ acc = []
+ for x in tmp:
+ thread_count += x[6]
+ acc.append("%d %d %f %r %d %d %d" % (x[0], x[1], x[2] - ini, x[3], x[4], x[5], thread_count))
+ f = open('pdebug.dat', 'w')
+ #Utils.write('\n'.join(acc))
+ f.write('\n'.join(acc))
+
+ tmp = [lst[:2] + [float(lst[2]) - ini] + lst[3:] for lst in tmp]
+
+ st = {}
+ for l in tmp:
+ if not l[0] in st:
+ st[l[0]] = len(st.keys())
+ tmp = [ [st[lst[0]]] + lst[1:] for lst in tmp ]
+ THREAD_AMOUNT = len(st.keys())
+
+ st = {}
+ for l in tmp:
+ if not l[1] in st:
+ st[l[1]] = len(st.keys())
+ tmp = [ [lst[0]] + [st[lst[1]]] + lst[2:] for lst in tmp ]
+
+
+ BAND = Options.options.dband
+
+ seen = {}
+ acc = []
+ for x in range(len(tmp)):
+ line = tmp[x]
+ id = line[1]
+
+ if id in seen:
+ continue
+ seen[id] = True
+
+ begin = line[2]
+ thread_id = line[0]
+ for y in range(x + 1, len(tmp)):
+ line = tmp[y]
+ if line[1] == id:
+ end = line[2]
+ #print id, thread_id, begin, end
+ #acc.append( ( 10*thread_id, 10*(thread_id+1), 10*begin, 10*end ) )
+ acc.append( (BAND * begin, BAND*thread_id, BAND*end - BAND*begin, BAND, line[3]) )
+ break
+
+ if Options.options.dmaxtime < 0.1:
+ gwidth = 1
+ for x in tmp:
+ m = BAND * x[2]
+ if m > gwidth:
+ gwidth = m
+ else:
+ gwidth = BAND * Options.options.dmaxtime
+
+ ratio = float(Options.options.dwidth) / gwidth
+ gwidth = Options.options.dwidth
+
+ gheight = BAND * (THREAD_AMOUNT + len(info) + 1.5)
+
+ out = []
+
+ out.append("""<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>
+<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.0//EN\"
+\"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd\">
+<svg xmlns=\"http://www.w3.org/2000/svg\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" version=\"1.0\"
+ x=\"%r\" y=\"%r\" width=\"%r\" height=\"%r\"
+ id=\"svg602\" xml:space=\"preserve\">
+
+<style type='text/css' media='screen'>
+ g.over rect { stroke:#FF0000; fill-opacity:0.4 }
+</style>
+
+<script type='text/javascript'><![CDATA[
+ var svg = document.getElementsByTagName('svg')[0];
+ var svgNS = svg.getAttribute('xmlns');
+ svg.addEventListener('mouseover',function(e){
+ var g = e.target.parentNode;
+ var x = document.getElementById('r_'+g.id);
+ if (x) {
+ g.setAttribute('class', g.getAttribute('class')+' over');
+ x.setAttribute('class', x.getAttribute('class')+' over');
+ showInfo(e, g.id);
+ }
+ },false);
+ svg.addEventListener('mouseout',function(e){
+ var g = e.target.parentNode;
+ var x = document.getElementById('r_'+g.id);
+ if (x) {
+ g.setAttribute('class',g.getAttribute('class').replace(' over',''));
+ x.setAttribute('class',x.getAttribute('class').replace(' over',''));
+ hideInfo(e);
+ }
+ },false);
+
+function showInfo(evt, txt) {
+ tooltip = document.getElementById('tooltip');
+
+ var t = document.getElementById('tooltiptext');
+ t.firstChild.data = txt;
+
+ var x = evt.clientX+10;
+ if (x > 200) { x -= t.getComputedTextLength() + 16; }
+ var y = evt.clientY+30;
+ tooltip.setAttribute("transform", "translate(" + x + "," + y + ")");
+ tooltip.setAttributeNS(null,"visibility","visible");
+
+ var r = document.getElementById('tooltiprect');
+ r.setAttribute('width', t.getComputedTextLength()+6)
+}
+
+
+function hideInfo(evt) {
+ tooltip = document.getElementById('tooltip');
+ tooltip.setAttributeNS(null,"visibility","hidden");
+}
+
+]]></script>
+
+<!-- inkscape requires a big rectangle or it will not export the pictures properly -->
+<rect
+ x='%r' y='%r'
+ width='%r' height='%r' z-index='10'
+ style=\"font-size:10;fill:#ffffff;fill-opacity:0.01;fill-rule:evenodd;stroke:#ffffff;\"
+ />\n
+
+""" % (0, 0, gwidth + 4, gheight + 4, 0, 0, gwidth + 4, gheight + 4))
+
+ # main title
+ if Options.options.dtitle:
+ out.append("""<text x="%d" y="%d" style="font-size:15px; text-anchor:middle; font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans">%s</text>
+""" % (gwidth/2, gheight - 5, Options.options.dtitle))
+
+ # the rectangles
+ groups = {}
+ for (x, y, w, h, clsname) in acc:
+ try:
+ groups[clsname].append((x, y, w, h))
+ except:
+ groups[clsname] = [(x, y, w, h)]
+
+ for cls in groups:
+
+ out.append("<g id='%s'>\n" % name2class(cls))
+
+ for (x, y, w, h) in groups[cls]:
+ out.append(""" <rect
+ x='%r' y='%r'
+ width='%r' height='%r' z-index='11'
+ style=\"font-size:10;fill:%s;fill-rule:evenodd;stroke:#000000;stroke-width:0.2px;\"
+ />\n""" % (2 + x*ratio, 2 + y, w*ratio, h, map_to_color(cls)))
+
+ out.append("</g>\n")
+
+ # output the caption
+ cnt = THREAD_AMOUNT
+
+ for (text, color) in info:
+ # caption box
+ b = BAND/2
+ out.append("""<g id='r_%s'><rect
+ x='%r' y='%r'
+ width='%r' height='%r'
+ style=\"font-size:10;fill:%s;fill-rule:evenodd;stroke:#000000;stroke-width:0.2px;\"
+ />\n""" % (name2class(text), 2 + BAND, 5 + (cnt + 0.5) * BAND, b, b, color))
+
+ # caption text
+ out.append("""<text
+ style="font-size:12px;font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans"
+ x="%r" y="%d">%s</text></g>\n""" % (2 + 2 * BAND, 5 + (cnt + 0.5) * BAND + 10, text))
+ cnt += 1
+
+ out.append("""
+<g transform="translate(0,0)" visibility="hidden" id="tooltip">
+ <rect id="tooltiprect" y="-15" x="-3" width="1" height="20" style="stroke:black;fill:#edefc2;stroke-width:1"/>
+ <text id="tooltiptext" style="font-family:Arial; font-size:12;fill:black;"> </text>
+</g>""")
+
+ out.append("\n</svg>")
+
+ #node = producer.bld.path.make_node('pdebug.svg')
+ f = open('pdebug.svg', 'w')
+ f.write("".join(out))
diff --git a/third_party/waf/wafadmin/3rdparty/batched_cc.py b/third_party/waf/wafadmin/3rdparty/batched_cc.py
new file mode 100644
index 0000000..7ed569c
--- /dev/null
+++ b/third_party/waf/wafadmin/3rdparty/batched_cc.py
@@ -0,0 +1,182 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006 (ita)
+
+"""
+Batched builds - compile faster
+instead of compiling object files one by one, c/c++ compilers are often able to compile at once:
+cc -c ../file1.c ../file2.c ../file3.c
+
+Files are output on the directory where the compiler is called, and dependencies are more difficult
+to track (do not run the command on all source files if only one file changes)
+
+As such, we do as if the files were compiled one by one, but no command is actually run:
+replace each cc/cpp Task by a TaskSlave
+A new task called TaskMaster collects the signatures from each slave and finds out the command-line
+to run.
+
+To set this up, the method ccroot::create_task is replaced by a new version, to enable batched builds
+it is only necessary to import this module in the configuration (no other change required)
+"""
+
+MAX_BATCH = 50
+MAXPARALLEL = False
+
+EXT_C = ['.c', '.cc', '.cpp', '.cxx']
+
+import os, threading
+import TaskGen, Task, ccroot, Build, Logs
+from TaskGen import extension, feature, before
+from Constants import *
+
+cc_str = '${CC} ${CCFLAGS} ${CPPFLAGS} ${_CCINCFLAGS} ${_CCDEFFLAGS} -c ${SRCLST}'
+cc_fun = Task.compile_fun_noshell('batched_cc', cc_str)[0]
+
+cxx_str = '${CXX} ${CXXFLAGS} ${CPPFLAGS} ${_CXXINCFLAGS} ${_CXXDEFFLAGS} -c ${SRCLST}'
+cxx_fun = Task.compile_fun_noshell('batched_cxx', cxx_str)[0]
+
+count = 70000
+class batch_task(Task.Task):
+ color = 'RED'
+
+ after = 'cc cxx'
+ before = 'cc_link cxx_link static_link'
+
+ def __str__(self):
+ return '(batch compilation for %d slaves)\n' % len(self.slaves)
+
+ def __init__(self, *k, **kw):
+ Task.Task.__init__(self, *k, **kw)
+ self.slaves = []
+ self.inputs = []
+ self.hasrun = 0
+
+ global count
+ count += 1
+ self.idx = count
+
+ def add_slave(self, slave):
+ self.slaves.append(slave)
+ self.set_run_after(slave)
+
+ def runnable_status(self):
+ for t in self.run_after:
+ if not t.hasrun:
+ return ASK_LATER
+
+ for t in self.slaves:
+ #if t.executed:
+ if t.hasrun != SKIPPED:
+ return RUN_ME
+
+ return SKIP_ME
+
+ def run(self):
+ outputs = []
+ self.outputs = []
+
+ srclst = []
+ slaves = []
+ for t in self.slaves:
+ if t.hasrun != SKIPPED:
+ slaves.append(t)
+ srclst.append(t.inputs[0].abspath(self.env))
+
+ self.env.SRCLST = srclst
+ self.cwd = slaves[0].inputs[0].parent.abspath(self.env)
+
+ env = self.env
+ app = env.append_unique
+ cpppath_st = env['CPPPATH_ST']
+ env._CCINCFLAGS = env.CXXINCFLAGS = []
+
+ # local flags come first
+ # set the user-defined includes paths
+ for i in env['INC_PATHS']:
+ app('_CCINCFLAGS', cpppath_st % i.abspath())
+ app('_CXXINCFLAGS', cpppath_st % i.abspath())
+ app('_CCINCFLAGS', cpppath_st % i.abspath(env))
+ app('_CXXINCFLAGS', cpppath_st % i.abspath(env))
+
+ # set the library include paths
+ for i in env['CPPPATH']:
+ app('_CCINCFLAGS', cpppath_st % i)
+ app('_CXXINCFLAGS', cpppath_st % i)
+
+ if self.slaves[0].__class__.__name__ == 'cc':
+ ret = cc_fun(self)
+ else:
+ ret = cxx_fun(self)
+
+ if ret:
+ return ret
+
+ for t in slaves:
+ t.old_post_run()
+
+from TaskGen import extension, feature, after
+
+import cc, cxx
+def wrap(fun):
+ def foo(self, node):
+ # we cannot control the extension, this sucks
+ self.obj_ext = '.o'
+
+ task = fun(self, node)
+ if not getattr(self, 'masters', None):
+ self.masters = {}
+ self.allmasters = []
+
+ if not node.parent.id in self.masters:
+ m = self.masters[node.parent.id] = self.master = self.create_task('batch')
+ self.allmasters.append(m)
+ else:
+ m = self.masters[node.parent.id]
+ if len(m.slaves) > MAX_BATCH:
+ m = self.masters[node.parent.id] = self.master = self.create_task('batch')
+ self.allmasters.append(m)
+
+ m.add_slave(task)
+ return task
+ return foo
+
+c_hook = wrap(cc.c_hook)
+extension(cc.EXT_CC)(c_hook)
+
+cxx_hook = wrap(cxx.cxx_hook)
+extension(cxx.EXT_CXX)(cxx_hook)
+
+
+@feature('cprogram', 'cshlib', 'cstaticlib')
+@after('apply_link')
+def link_after_masters(self):
+ if getattr(self, 'allmasters', None):
+ for m in self.allmasters:
+ self.link_task.set_run_after(m)
+
+for c in ['cc', 'cxx']:
+ t = Task.TaskBase.classes[c]
+ def run(self):
+ pass
+
+ def post_run(self):
+ #self.executed=1
+ pass
+
+ def can_retrieve_cache(self):
+ if self.old_can_retrieve_cache():
+ for m in self.generator.allmasters:
+ try:
+ m.slaves.remove(self)
+ except ValueError:
+ pass #this task wasn't included in that master
+ return 1
+ else:
+ return None
+
+ setattr(t, 'oldrun', t.__dict__['run'])
+ setattr(t, 'run', run)
+ setattr(t, 'old_post_run', t.post_run)
+ setattr(t, 'post_run', post_run)
+ setattr(t, 'old_can_retrieve_cache', t.can_retrieve_cache)
+ setattr(t, 'can_retrieve_cache', can_retrieve_cache)
diff --git a/third_party/waf/wafadmin/3rdparty/boost.py b/third_party/waf/wafadmin/3rdparty/boost.py
new file mode 100644
index 0000000..1cbbf7e
--- /dev/null
+++ b/third_party/waf/wafadmin/3rdparty/boost.py
@@ -0,0 +1,342 @@
+#!/usr/bin/env python
+# encoding: utf-8
+#
+# partially based on boost.py written by Gernot Vormayr
+# written by Ruediger Sonderfeld <ruediger@c-plusplus.de>, 2008
+# modified by Bjoern Michaelsen, 2008
+# modified by Luca Fossati, 2008
+# rewritten for waf 1.5.1, Thomas Nagy, 2008
+#
+#def set_options(opt):
+# opt.tool_options('boost')
+# # ...
+#
+#def configure(conf):
+# # ... (e.g. conf.check_tool('g++'))
+# conf.check_tool('boost')
+# conf.check_boost(lib='signals filesystem', static='onlystatic', score_version=(-1000, 1000), tag_minscore=1000)
+#
+#def build(bld):
+# bld(source='main.c', target='bar', uselib="BOOST BOOST_SYSTEM")
+#
+#ISSUES:
+# * find_includes should be called only once!
+# * support mandatory
+
+######## boost update ###########
+## ITA: * the method get_boost_version_number does work
+## * the rest of the code has not really been tried
+# * make certain a demo is provided (in demos/adv for example)
+
+# TODO: bad and underdocumented code -> boost.py will be removed in waf 1.6 to be rewritten later
+
+import os.path, glob, types, re, sys
+import Configure, config_c, Options, Utils, Logs
+from Logs import warn, debug
+from Configure import conf
+
+boost_code = '''
+#include <iostream>
+#include <boost/version.hpp>
+int main() { std::cout << BOOST_VERSION << std::endl; }
+'''
+
+boost_libpath = ['/usr/lib', '/usr/local/lib', '/opt/local/lib', '/sw/lib', '/lib']
+boost_cpppath = ['/usr/include', '/usr/local/include', '/opt/local/include', '/sw/include']
+
+STATIC_NOSTATIC = 'nostatic'
+STATIC_BOTH = 'both'
+STATIC_ONLYSTATIC = 'onlystatic'
+
+is_versiontag = re.compile('^\d+_\d+_?\d*$')
+is_threadingtag = re.compile('^mt$')
+is_abitag = re.compile('^[sgydpn]+$')
+is_toolsettag = re.compile('^(acc|borland|como|cw|dmc|darwin|gcc|hp_cxx|intel|kylix|vc|mgw|qcc|sun|vacpp)\d*$')
+is_pythontag=re.compile('^py[0-9]{2}$')
+
+def set_options(opt):
+ opt.add_option('--boost-includes', type='string', default='', dest='boostincludes', help='path to the boost directory where the includes are e.g. /usr/local/include/boost-1_35')
+ opt.add_option('--boost-libs', type='string', default='', dest='boostlibs', help='path to the directory where the boost libs are e.g. /usr/local/lib')
+
+def string_to_version(s):
+ version = s.split('.')
+ if len(version) < 3: return 0
+ return int(version[0])*100000 + int(version[1])*100 + int(version[2])
+
+def version_string(version):
+ major = version / 100000
+ minor = version / 100 % 1000
+ minor_minor = version % 100
+ if minor_minor == 0:
+ return "%d_%d" % (major, minor)
+ else:
+ return "%d_%d_%d" % (major, minor, minor_minor)
+
+def libfiles(lib, pattern, lib_paths):
+ result = []
+ for lib_path in lib_paths:
+ libname = pattern % ('boost_%s[!_]*' % lib)
+ result += glob.glob(os.path.join(lib_path, libname))
+ return result
+
+@conf
+def get_boost_version_number(self, dir):
+ """silently retrieve the boost version number"""
+ try:
+ return self.run_c_code(compiler='cxx', code=boost_code, includes=dir, execute=1, env=self.env.copy(), type='cprogram', compile_mode='cxx', compile_filename='test.cpp')
+ except Configure.ConfigurationError, e:
+ return -1
+
+def set_default(kw, var, val):
+ if not var in kw:
+ kw[var] = val
+
+def tags_score(tags, kw):
+ """
+ checks library tags
+
+ see http://www.boost.org/doc/libs/1_35_0/more/getting_started/unix-variants.html 6.1
+ """
+ score = 0
+ needed_tags = {
+ 'threading': kw['tag_threading'],
+ 'abi': kw['tag_abi'],
+ 'toolset': kw['tag_toolset'],
+ 'version': kw['tag_version'],
+ 'python': kw['tag_python']
+ }
+
+ if kw['tag_toolset'] is None:
+ v = kw['env']
+ toolset = v['CXX_NAME']
+ if v['CXX_VERSION']:
+ version_no = v['CXX_VERSION'].split('.')
+ toolset += version_no[0]
+ if len(version_no) > 1:
+ toolset += version_no[1]
+ needed_tags['toolset'] = toolset
+
+ found_tags = {}
+ for tag in tags:
+ if is_versiontag.match(tag): found_tags['version'] = tag
+ if is_threadingtag.match(tag): found_tags['threading'] = tag
+ if is_abitag.match(tag): found_tags['abi'] = tag
+ if is_toolsettag.match(tag): found_tags['toolset'] = tag
+ if is_pythontag.match(tag): found_tags['python'] = tag
+
+ for tagname in needed_tags.iterkeys():
+ if needed_tags[tagname] is not None and tagname in found_tags:
+ if re.compile(needed_tags[tagname]).match(found_tags[tagname]):
+ score += kw['score_' + tagname][0]
+ else:
+ score += kw['score_' + tagname][1]
+ return score
+
+@conf
+def validate_boost(self, kw):
+ ver = kw.get('version', '')
+
+ for x in 'min_version max_version version'.split():
+ set_default(kw, x, ver)
+
+ set_default(kw, 'lib', '')
+ kw['lib'] = Utils.to_list(kw['lib'])
+
+ set_default(kw, 'env', self.env)
+
+ set_default(kw, 'libpath', boost_libpath)
+ set_default(kw, 'cpppath', boost_cpppath)
+
+ for x in 'tag_threading tag_version tag_toolset'.split():
+ set_default(kw, x, None)
+ set_default(kw, 'tag_abi', '^[^d]*$')
+
+ set_default(kw, 'python', str(sys.version_info[0]) + str(sys.version_info[1]) )
+ set_default(kw, 'tag_python', '^py' + kw['python'] + '$')
+
+ set_default(kw, 'score_threading', (10, -10))
+ set_default(kw, 'score_abi', (10, -10))
+ set_default(kw, 'score_python', (10,-10))
+ set_default(kw, 'score_toolset', (1, -1))
+ set_default(kw, 'score_version', (100, -100))
+
+ set_default(kw, 'score_min', 0)
+ set_default(kw, 'static', STATIC_NOSTATIC)
+ set_default(kw, 'found_includes', False)
+ set_default(kw, 'min_score', 0)
+
+ set_default(kw, 'errmsg', 'not found')
+ set_default(kw, 'okmsg', 'ok')
+
+@conf
+def find_boost_includes(self, kw):
+ """
+ check every path in kw['cpppath'] for subdir
+ that either starts with boost- or is named boost.
+
+ Then the version is checked and selected accordingly to
+ min_version/max_version. The highest possible version number is
+ selected!
+
+ If no versiontag is set the versiontag is set accordingly to the
+ selected library and CPPPATH_BOOST is set.
+ """
+ boostPath = getattr(Options.options, 'boostincludes', '')
+ if boostPath:
+ boostPath = [os.path.normpath(os.path.expandvars(os.path.expanduser(boostPath)))]
+ else:
+ boostPath = Utils.to_list(kw['cpppath'])
+
+ min_version = string_to_version(kw.get('min_version', ''))
+ max_version = string_to_version(kw.get('max_version', '')) or (sys.maxint - 1)
+
+ version = 0
+ for include_path in boostPath:
+ boost_paths = [p for p in glob.glob(os.path.join(include_path, 'boost*')) if os.path.isdir(p)]
+ debug('BOOST Paths: %r' % boost_paths)
+ for path in boost_paths:
+ pathname = os.path.split(path)[-1]
+ ret = -1
+ if pathname == 'boost':
+ path = include_path
+ ret = self.get_boost_version_number(path)
+ elif pathname.startswith('boost-'):
+ ret = self.get_boost_version_number(path)
+ ret = int(ret)
+
+ if ret != -1 and ret >= min_version and ret <= max_version and ret > version:
+ boost_path = path
+ version = ret
+ if not version:
+ self.fatal('boost headers not found! (required version min: %s max: %s)'
+ % (kw['min_version'], kw['max_version']))
+ return False
+
+ found_version = version_string(version)
+ versiontag = '^' + found_version + '$'
+ if kw['tag_version'] is None:
+ kw['tag_version'] = versiontag
+ elif kw['tag_version'] != versiontag:
+ warn('boost header version %r and tag_version %r do not match!' % (versiontag, kw['tag_version']))
+ env = self.env
+ env['CPPPATH_BOOST'] = boost_path
+ env['BOOST_VERSION'] = found_version
+ self.found_includes = 1
+ ret = 'Version %s (%s)' % (found_version, boost_path)
+ return ret
+
+@conf
+def find_boost_library(self, lib, kw):
+
+ def find_library_from_list(lib, files):
+ lib_pattern = re.compile('.*boost_(.*?)\..*')
+ result = (None, None)
+ resultscore = kw['min_score'] - 1
+ for file in files:
+ m = lib_pattern.search(file, 1)
+ if m:
+ libname = m.group(1)
+ libtags = libname.split('-')[1:]
+ currentscore = tags_score(libtags, kw)
+ if currentscore > resultscore:
+ result = (libname, file)
+ resultscore = currentscore
+ return result
+
+ lib_paths = getattr(Options.options, 'boostlibs', '')
+ if lib_paths:
+ lib_paths = [os.path.normpath(os.path.expandvars(os.path.expanduser(lib_paths)))]
+ else:
+ lib_paths = Utils.to_list(kw['libpath'])
+
+ v = kw.get('env', self.env)
+
+ (libname, file) = (None, None)
+ if kw['static'] in [STATIC_NOSTATIC, STATIC_BOTH]:
+ st_env_prefix = 'LIB'
+ files = libfiles(lib, v['shlib_PATTERN'], lib_paths)
+ (libname, file) = find_library_from_list(lib, files)
+ if libname is None and kw['static'] in [STATIC_ONLYSTATIC, STATIC_BOTH]:
+ st_env_prefix = 'STATICLIB'
+ staticLibPattern = v['staticlib_PATTERN']
+ if self.env['CC_NAME'] == 'msvc':
+ staticLibPattern = 'lib' + staticLibPattern
+ files = libfiles(lib, staticLibPattern, lib_paths)
+ (libname, file) = find_library_from_list(lib, files)
+ if libname is not None:
+ v['LIBPATH_BOOST_' + lib.upper()] = [os.path.split(file)[0]]
+ if self.env['CC_NAME'] == 'msvc' and os.path.splitext(file)[1] == '.lib':
+ v[st_env_prefix + '_BOOST_' + lib.upper()] = ['libboost_'+libname]
+ else:
+ v[st_env_prefix + '_BOOST_' + lib.upper()] = ['boost_'+libname]
+ return
+ self.fatal('lib boost_' + lib + ' not found!')
+
+@conf
+def check_boost(self, *k, **kw):
+ """
+ This should be the main entry point
+
+- min_version
+- max_version
+- version
+- include_path
+- lib_path
+- lib
+- toolsettag - None or a regexp
+- threadingtag - None or a regexp
+- abitag - None or a regexp
+- versiontag - WARNING: you should rather use version or min_version/max_version
+- static - look for static libs (values:
+ 'nostatic' or STATIC_NOSTATIC - ignore static libs (default)
+ 'both' or STATIC_BOTH - find static libs, too
+ 'onlystatic' or STATIC_ONLYSTATIC - find only static libs
+- score_version
+- score_abi
+- scores_threading
+- score_toolset
+ * the scores are tuples (match_score, nomatch_score)
+ match_score is the added to the score if the tag is matched
+ nomatch_score is added when a tag is found and does not match
+- min_score
+ """
+
+ if not self.env['CXX']:
+ self.fatal('load a c++ compiler tool first, for example conf.check_tool("g++")')
+ self.validate_boost(kw)
+ ret = None
+ try:
+ if not kw.get('found_includes', None):
+ self.check_message_1(kw.get('msg_includes', 'boost headers'))
+ ret = self.find_boost_includes(kw)
+
+ except Configure.ConfigurationError, e:
+ if 'errmsg' in kw:
+ self.check_message_2(kw['errmsg'], 'YELLOW')
+ if 'mandatory' in kw:
+ if Logs.verbose > 1:
+ raise
+ else:
+ self.fatal('the configuration failed (see %r)' % self.log.name)
+ else:
+ if 'okmsg' in kw:
+ self.check_message_2(kw.get('okmsg_includes', ret))
+
+ for lib in kw['lib']:
+ self.check_message_1('library boost_'+lib)
+ try:
+ self.find_boost_library(lib, kw)
+ except Configure.ConfigurationError, e:
+ ret = False
+ if 'errmsg' in kw:
+ self.check_message_2(kw['errmsg'], 'YELLOW')
+ if 'mandatory' in kw:
+ if Logs.verbose > 1:
+ raise
+ else:
+ self.fatal('the configuration failed (see %r)' % self.log.name)
+ else:
+ if 'okmsg' in kw:
+ self.check_message_2(kw['okmsg'])
+
+ return ret
diff --git a/third_party/waf/wafadmin/3rdparty/build_file_tracker.py b/third_party/waf/wafadmin/3rdparty/build_file_tracker.py
new file mode 100644
index 0000000..5fc7358
--- /dev/null
+++ b/third_party/waf/wafadmin/3rdparty/build_file_tracker.py
@@ -0,0 +1,53 @@
+#! /usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2015
+
+"""
+Force tasks to use file timestamps to force partial rebuilds when touch-ing build files
+
+touch out/libfoo.a
+... rebuild what depends on libfoo.a
+
+to use::
+ def options(opt):
+ opt.tool_options('build_file_tracker')
+"""
+
+import os
+import Task, Utils
+
+def signature(self):
+ try: return self.cache_sig[0]
+ except AttributeError: pass
+
+ self.m = Utils.md5()
+
+ # explicit deps
+ exp_sig = self.sig_explicit_deps()
+
+ # env vars
+ var_sig = self.sig_vars()
+
+ # implicit deps
+ imp_sig = Task.SIG_NIL
+ if self.scan:
+ try:
+ imp_sig = self.sig_implicit_deps()
+ except ValueError:
+ return self.signature()
+
+ # timestamp dependency on build files only (source files are hashed)
+ buf = []
+ for k in self.inputs + getattr(self, 'dep_nodes', []) + self.generator.bld.node_deps.get(self.unique_id(), []):
+ if k.id & 3 == 3:
+ t = os.stat(k.abspath(self.env)).st_mtime
+ buf.append(t)
+ self.m.update(str(buf))
+
+ # we now have the signature (first element) and the details (for debugging)
+ ret = self.m.digest()
+ self.cache_sig = (ret, exp_sig, imp_sig, var_sig)
+ return ret
+
+Task.Task.signature_bak = Task.Task.signature # unused, kept just in case
+Task.Task.signature = signature # overridden
diff --git a/third_party/waf/wafadmin/3rdparty/fluid.py b/third_party/waf/wafadmin/3rdparty/fluid.py
new file mode 100644
index 0000000..c858fe3
--- /dev/null
+++ b/third_party/waf/wafadmin/3rdparty/fluid.py
@@ -0,0 +1,26 @@
+#!/usr/bin/python
+# encoding: utf-8
+# Grygoriy Fuchedzhy 2009
+
+"""
+Compile fluid files (fltk graphic library). Use the 'fluid' feature in conjuction with the 'cxx' feature.
+"""
+
+import Task
+from TaskGen import extension
+
+Task.simple_task_type('fluid', '${FLUID} -c -o ${TGT[0].abspath(env)} -h ${TGT[1].abspath(env)} ${SRC}', 'BLUE', shell=False, ext_out='.cxx')
+
+@extension('.fl')
+def fluid(self, node):
+ """add the .fl to the source list; the cxx file generated will be compiled when possible"""
+ cpp = node.change_ext('.cpp')
+ hpp = node.change_ext('.hpp')
+ self.create_task('fluid', node, [cpp, hpp])
+
+ if 'cxx' in self.features:
+ self.allnodes.append(cpp)
+
+def detect(conf):
+ fluid = conf.find_program('fluid', var='FLUID', mandatory=True)
+ conf.check_cfg(path='fltk-config', package='', args='--cxxflags --ldflags', uselib_store='FLTK', mandatory=True)
diff --git a/third_party/waf/wafadmin/3rdparty/gccdeps.py b/third_party/waf/wafadmin/3rdparty/gccdeps.py
new file mode 100644
index 0000000..28a889d
--- /dev/null
+++ b/third_party/waf/wafadmin/3rdparty/gccdeps.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2008-2010 (ita)
+
+"""
+Execute the tasks with gcc -MD, read the dependencies from the .d file
+and prepare the dependency calculation for the next run
+"""
+
+import os, re, threading
+import Task, Logs, Utils, preproc
+from TaskGen import before, after, feature
+
+lock = threading.Lock()
+
+preprocessor_flag = '-MD'
+
+@feature('cc')
+@before('apply_core')
+def add_mmd_cc(self):
+ if self.env.get_flat('CCFLAGS').find(preprocessor_flag) < 0:
+ self.env.append_value('CCFLAGS', preprocessor_flag)
+
+@feature('cxx')
+@before('apply_core')
+def add_mmd_cxx(self):
+ if self.env.get_flat('CXXFLAGS').find(preprocessor_flag) < 0:
+ self.env.append_value('CXXFLAGS', preprocessor_flag)
+
+def scan(self):
+ "the scanner does not do anything initially"
+ nodes = self.generator.bld.node_deps.get(self.unique_id(), [])
+ names = []
+ return (nodes, names)
+
+re_o = re.compile("\.o$")
+re_src = re.compile("^(\.\.)[\\/](.*)$")
+
+def post_run(self):
+ # The following code is executed by threads, it is not safe, so a lock is needed...
+
+ if getattr(self, 'cached', None):
+ return Task.Task.post_run(self)
+
+ name = self.outputs[0].abspath(self.env)
+ name = re_o.sub('.d', name)
+ txt = Utils.readf(name)
+ #os.unlink(name)
+
+ txt = txt.replace('\\\n', '')
+
+ lst = txt.strip().split(':')
+ val = ":".join(lst[1:])
+ val = val.split()
+
+ nodes = []
+ bld = self.generator.bld
+
+ f = re.compile("^("+self.env.variant()+"|\.\.)[\\/](.*)$")
+ for x in val:
+ if os.path.isabs(x):
+
+ if not preproc.go_absolute:
+ continue
+
+ lock.acquire()
+ try:
+ node = bld.root.find_resource(x)
+ finally:
+ lock.release()
+ else:
+ g = re.search(re_src, x)
+ if g:
+ x = g.group(2)
+ lock.acquire()
+ try:
+ node = bld.bldnode.parent.find_resource(x)
+ finally:
+ lock.release()
+ else:
+ g = re.search(f, x)
+ if g:
+ x = g.group(2)
+ lock.acquire()
+ try:
+ node = bld.srcnode.find_resource(x)
+ finally:
+ lock.release()
+
+ if id(node) == id(self.inputs[0]):
+ # ignore the source file, it is already in the dependencies
+ # this way, successful config tests may be retrieved from the cache
+ continue
+
+ if not node:
+ raise ValueError('could not find %r for %r' % (x, self))
+ else:
+ nodes.append(node)
+
+ Logs.debug('deps: real scanner for %s returned %s' % (str(self), str(nodes)))
+
+ bld.node_deps[self.unique_id()] = nodes
+ bld.raw_deps[self.unique_id()] = []
+
+ try:
+ del self.cache_sig
+ except:
+ pass
+
+ Task.Task.post_run(self)
+
+import Constants, Utils
+def sig_implicit_deps(self):
+ try:
+ return Task.Task.sig_implicit_deps(self)
+ except Utils.WafError:
+ return Constants.SIG_NIL
+
+for name in 'cc cxx'.split():
+ try:
+ cls = Task.TaskBase.classes[name]
+ except KeyError:
+ pass
+ else:
+ cls.post_run = post_run
+ cls.scan = scan
+ cls.sig_implicit_deps = sig_implicit_deps
diff --git a/third_party/waf/wafadmin/3rdparty/go.py b/third_party/waf/wafadmin/3rdparty/go.py
new file mode 100644
index 0000000..f8397c7
--- /dev/null
+++ b/third_party/waf/wafadmin/3rdparty/go.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# go.py - Waf tool for the Go programming language
+# By: Tom Wambold <tom5760@gmail.com>
+
+import platform, os
+
+import Task
+import Utils
+from TaskGen import feature, extension, after
+
+Task.simple_task_type('gocompile', '${GOC} ${GOCFLAGS} -o ${TGT} ${SRC}', shell=False)
+Task.simple_task_type('gopack', '${GOP} grc ${TGT} ${SRC}', shell=False)
+Task.simple_task_type('golink', '${GOL} ${GOLFLAGS} -o ${TGT} ${SRC}', shell=False)
+
+def detect(conf):
+
+ def set_def(var, val):
+ if not conf.env[var]:
+ conf.env[var] = val
+
+ goarch = os.getenv("GOARCH")
+
+ if goarch == '386':
+ set_def('GO_PLATFORM', 'i386')
+ elif goarch == 'amd64':
+ set_def('GO_PLATFORM', 'x86_64')
+ elif goarch == 'arm':
+ set_def('GO_PLATFORM', 'arm')
+ else:
+ set_def('GO_PLATFORM', platform.machine())
+
+ if conf.env.GO_PLATFORM == 'x86_64':
+ set_def('GO_COMPILER', '6g')
+ set_def('GO_LINKER', '6l')
+ set_def('GO_EXTENSION', '.6')
+ elif conf.env.GO_PLATFORM in ['i386', 'i486', 'i586', 'i686']:
+ set_def('GO_COMPILER', '8g')
+ set_def('GO_LINKER', '8l')
+ set_def('GO_EXTENSION', '.8')
+ elif conf.env.GO_PLATFORM == 'arm':
+ set_def('GO_COMPILER', '5g')
+ set_def('GO_LINKER', '5l')
+ set_def('GO_EXTENSION', '.5')
+
+ if not (conf.env.GO_COMPILER or conf.env.GO_LINKER or conf.env.GO_EXTENSION):
+ raise conf.fatal('Unsupported platform ' + platform.machine())
+
+ set_def('GO_PACK', 'gopack')
+ set_def('GO_PACK_EXTENSION', '.a')
+
+ conf.find_program(conf.env.GO_COMPILER, var='GOC', mandatory=True)
+ conf.find_program(conf.env.GO_LINKER, var='GOL', mandatory=True)
+ conf.find_program(conf.env.GO_PACK, var='GOP', mandatory=True)
+ conf.find_program('cgo', var='CGO', mandatory=True)
+
+@extension('.go')
+def compile_go(self, node):
+ try:
+ self.go_nodes.append(node)
+ except AttributeError:
+ self.go_nodes = [node]
+
+@feature('go')
+@after('apply_core')
+def apply_compile_go(self):
+ try:
+ nodes = self.go_nodes
+ except AttributeError:
+ self.go_compile_task = None
+ else:
+ self.go_compile_task = self.create_task('gocompile',
+ nodes,
+ [self.path.find_or_declare(self.target + self.env.GO_EXTENSION)])
+
+@feature('gopackage', 'goprogram')
+@after('apply_compile_go')
+def apply_goinc(self):
+ if not getattr(self, 'go_compile_task', None):
+ return
+
+ names = self.to_list(getattr(self, 'uselib_local', []))
+ for name in names:
+ obj = self.name_to_obj(name)
+ if not obj:
+ raise Utils.WafError('object %r was not found in uselib_local '
+ '(required by %r)' % (lib_name, self.name))
+ obj.post()
+ self.go_compile_task.set_run_after(obj.go_package_task)
+ self.go_compile_task.dep_nodes.extend(obj.go_package_task.outputs)
+ self.env.append_unique('GOCFLAGS', '-I' + obj.path.abspath(obj.env))
+ self.env.append_unique('GOLFLAGS', '-L' + obj.path.abspath(obj.env))
+
+@feature('gopackage')
+@after('apply_goinc')
+def apply_gopackage(self):
+ self.go_package_task = self.create_task('gopack',
+ self.go_compile_task.outputs[0],
+ self.path.find_or_declare(self.target + self.env.GO_PACK_EXTENSION))
+ self.go_package_task.set_run_after(self.go_compile_task)
+ self.go_package_task.dep_nodes.extend(self.go_compile_task.outputs)
+
+@feature('goprogram')
+@after('apply_goinc')
+def apply_golink(self):
+ self.go_link_task = self.create_task('golink',
+ self.go_compile_task.outputs[0],
+ self.path.find_or_declare(self.target))
+ self.go_link_task.set_run_after(self.go_compile_task)
+ self.go_link_task.dep_nodes.extend(self.go_compile_task.outputs)
diff --git a/third_party/waf/wafadmin/3rdparty/lru_cache.py b/third_party/waf/wafadmin/3rdparty/lru_cache.py
new file mode 100644
index 0000000..96f0e6c
--- /dev/null
+++ b/third_party/waf/wafadmin/3rdparty/lru_cache.py
@@ -0,0 +1,96 @@
+#! /usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy 2011
+
+import os, shutil, re
+import Options, Build, Logs
+
+"""
+Apply a least recently used policy to the Waf cache.
+
+For performance reasons, it is called after the build is complete.
+
+We assume that the the folders are written atomically
+
+Do export WAFCACHE=/tmp/foo-xyz where xyz represents the cache size in megabytes
+If missing, the default cache size will be set to 10GB
+"""
+
+re_num = re.compile('[a-zA-Z_]+(\d+)')
+
+CACHESIZE = 10*1024*1024*1024 # in bytes
+CLEANRATIO = 0.8
+DIRSIZE = 4096
+
+def compile(self):
+ if Options.cache_global and not Options.options.nocache:
+ try:
+ os.makedirs(Options.cache_global)
+ except:
+ pass
+
+ try:
+ self.raw_compile()
+ finally:
+ if Options.cache_global and not Options.options.nocache:
+ self.sweep()
+
+def sweep(self):
+ global CACHESIZE
+ CACHEDIR = Options.cache_global
+
+ # get the cache max size from the WAFCACHE filename
+ re_num = re.compile('[a-zA-Z_]+(\d+)')
+ val = re_num.sub('\\1', os.path.basename(Options.cache_global))
+ try:
+ CACHESIZE = int(val)
+ except:
+ pass
+
+ # map folder names to timestamps
+ flist = {}
+ for x in os.listdir(CACHEDIR):
+ j = os.path.join(CACHEDIR, x)
+ if os.path.isdir(j) and len(x) == 32: # dir names are md5 hexdigests
+ flist[x] = [os.stat(j).st_mtime, 0]
+
+ for (x, v) in flist.items():
+ cnt = DIRSIZE # each entry takes 4kB
+ d = os.path.join(CACHEDIR, x)
+ for k in os.listdir(d):
+ cnt += os.stat(os.path.join(d, k)).st_size
+ flist[x][1] = cnt
+
+ total = sum([x[1] for x in flist.values()])
+ Logs.debug('lru: Cache size is %r' % total)
+
+ if total >= CACHESIZE:
+ Logs.debug('lru: Trimming the cache since %r > %r' % (total, CACHESIZE))
+
+ # make a list to sort the folders by timestamp
+ lst = [(p, v[0], v[1]) for (p, v) in flist.items()]
+ lst.sort(key=lambda x: x[1]) # sort by timestamp
+ lst.reverse()
+
+ while total >= CACHESIZE * CLEANRATIO:
+ (k, t, s) = lst.pop()
+ p = os.path.join(CACHEDIR, k)
+ v = p + '.del'
+ try:
+ os.rename(p, v)
+ except:
+ # someone already did it
+ pass
+ else:
+ try:
+ shutil.rmtree(v)
+ except:
+ # this should not happen, but who knows?
+ Logs.warn('If you ever see this message, report it (%r)' % v)
+ total -= s
+ del flist[k]
+ Logs.debug('lru: Total at the end %r' % total)
+
+Build.BuildContext.raw_compile = Build.BuildContext.compile
+Build.BuildContext.compile = compile
+Build.BuildContext.sweep = sweep
diff --git a/third_party/waf/wafadmin/3rdparty/paranoid.py b/third_party/waf/wafadmin/3rdparty/paranoid.py
new file mode 100644
index 0000000..ba6d752
--- /dev/null
+++ b/third_party/waf/wafadmin/3rdparty/paranoid.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# ita 2010
+
+import Logs, Utils, Build, Task
+
+def say(txt):
+ Logs.warn("^o^: %s" % txt)
+
+try:
+ ret = Utils.cmd_output('which cowsay 2> /dev/null').strip()
+except Exception, e:
+ pass
+else:
+ def say(txt):
+ f = Utils.cmd_output([ret, txt])
+ Utils.pprint('PINK', f)
+
+say('you make the errors, we detect them')
+
+def check_task_classes(self):
+ for x in Task.TaskBase.classes:
+ if isinstance(x, Task.Task):
+ if not getattr(cls, 'ext_in', None) or getattr(cls, 'before', None):
+ say('class %s has no precedence constraints (ext_in/before)')
+ if not getattr(cls, 'ext_out', None) or getattr(cls, 'after', None):
+ say('class %s has no precedence constraints (ext_out/after)')
+
+comp = Build.BuildContext.compile
+def compile(self):
+ if not getattr(self, 'magic', None):
+ check_task_classes(self)
+ return comp(self)
+Build.BuildContext.compile = compile
diff --git a/third_party/waf/wafadmin/3rdparty/prefork.py b/third_party/waf/wafadmin/3rdparty/prefork.py
new file mode 100755
index 0000000..88fb4e4
--- /dev/null
+++ b/third_party/waf/wafadmin/3rdparty/prefork.py
@@ -0,0 +1,275 @@
+#! /usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2015 (ita)
+#
+# prefer the waf 1.8 version
+
+"""
+The full samba build can be faster by ~10%, but there are a few limitations:
+* only one build process should be run at a time as the servers would use the same ports
+* only one build command is going to be called ("waf build configure build" would not work)
+
+def build(bld):
+
+ mod = Utils.load_tool('prefork')
+ mod.build(bld)
+ ...
+ (build declarations after)
+"""
+
+import os, re, socket, threading, sys, subprocess, time, atexit, traceback
+try:
+ import SocketServer
+except ImportError:
+ import socketserver as SocketServer
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+try:
+ import cPickle
+except ImportError:
+ import pickle as cPickle
+
+DEFAULT_PORT = 51200
+
+HEADER_SIZE = 128
+
+REQ = 'REQ'
+RES = 'RES'
+BYE = 'BYE'
+
+def make_header(params):
+ header = ','.join(params)
+ if sys.hexversion > 0x3000000:
+ header = header.encode('iso8859-1')
+ header = header.ljust(HEADER_SIZE)
+ assert(len(header) == HEADER_SIZE)
+ return header
+
+
+re_valid_query = re.compile('^[a-zA-Z0-9_, ]+$')
+class req(SocketServer.StreamRequestHandler):
+ def handle(self):
+ while 1:
+ try:
+ self.process_command()
+ except Exception as e:
+ print(e)
+ break
+
+ def process_command(self):
+ query = self.rfile.read(HEADER_SIZE)
+ if not query:
+ return
+ #print(len(query))
+ assert(len(query) == HEADER_SIZE)
+ if sys.hexversion > 0x3000000:
+ query = query.decode('iso8859-1')
+ #print "%r" % query
+ if not re_valid_query.match(query):
+ raise ValueError('Invalid query %r' % query)
+
+ query = query.strip().split(',')
+
+ if query[0] == REQ:
+ self.run_command(query[1:])
+ elif query[0] == BYE:
+ raise ValueError('Exit')
+ else:
+ raise ValueError('Invalid query %r' % query)
+
+ def run_command(self, query):
+
+ size = int(query[0])
+ data = self.rfile.read(size)
+ assert(len(data) == size)
+ kw = cPickle.loads(data)
+
+ # run command
+ ret = out = err = exc = None
+ cmd = kw['cmd']
+ del kw['cmd']
+ #print(cmd)
+
+ try:
+ if kw['stdout'] or kw['stderr']:
+ p = subprocess.Popen(cmd, **kw)
+ (out, err) = p.communicate()
+ ret = p.returncode
+ else:
+ ret = subprocess.Popen(cmd, **kw).wait()
+ except Exception as e:
+ ret = -1
+ exc = str(e) + traceback.format_exc()
+
+ # write the results
+ if out or err or exc:
+ data = (out, err, exc)
+ data = cPickle.dumps(data, -1)
+ else:
+ data = ''
+
+ params = [RES, str(ret), str(len(data))]
+
+ self.wfile.write(make_header(params))
+
+ if data:
+ self.wfile.write(data)
+
+def create_server(conn, cls):
+ #SocketServer.ThreadingTCPServer.allow_reuse_address = True
+ #server = SocketServer.ThreadingTCPServer(conn, req)
+
+ SocketServer.TCPServer.allow_reuse_address = True
+ server = SocketServer.TCPServer(conn, req)
+ #server.timeout = 6000 # seconds
+ server.serve_forever(poll_interval=0.001)
+
+if __name__ == '__main__':
+ if len(sys.argv) > 1:
+ port = int(sys.argv[1])
+ else:
+ port = DEFAULT_PORT
+ #conn = (socket.gethostname(), port)
+ conn = ("127.0.0.1", port)
+ #print("listening - %r %r\n" % conn)
+ create_server(conn, req)
+else:
+
+ import Runner, Utils
+
+ def init_task_pool(self):
+ # lazy creation, and set a common pool for all task consumers
+ pool = self.pool = []
+ for i in range(self.numjobs):
+ consumer = Runner.get_pool()
+ pool.append(consumer)
+ consumer.idx = i
+ self.ready = Queue(0)
+ def setq(consumer):
+ consumer.ready = self.ready
+ try:
+ threading.current_thread().idx = consumer.idx
+ except Exception as e:
+ print(e)
+ for x in pool:
+ x.ready.put(setq)
+ return pool
+ Runner.Parallel.init_task_pool = init_task_pool
+
+ PORT = 51200
+
+ def make_server(idx):
+ port = PORT + idx
+ cmd = [sys.executable, os.path.abspath(__file__), str(port)]
+ proc = subprocess.Popen(cmd)
+ proc.port = port
+ return proc
+
+ def make_conn(srv):
+ #port = PORT + idx
+ port = srv.port
+ conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ conn.connect(('127.0.0.1', port))
+ return conn
+
+ SERVERS = []
+ CONNS = []
+ def close_all():
+ while CONNS:
+ conn = CONNS.pop()
+ try:
+ conn.close()
+ except:
+ pass
+ while SERVERS:
+ srv = SERVERS.pop()
+ try:
+ srv.kill()
+ except:
+ pass
+ atexit.register(close_all)
+
+ def put_data(conn, data):
+ conn.send(data)
+
+ def read_data(conn, siz):
+ ret = conn.recv(siz)
+ if not ret:
+ print("closed connection?")
+
+ assert(len(ret) == siz)
+ return ret
+
+ def exec_command(cmd, **kw):
+ if 'log' in kw:
+ log = kw['log']
+ kw['stdout'] = kw['stderr'] = subprocess.PIPE
+ del(kw['log'])
+ else:
+ kw['stdout'] = kw['stderr'] = None
+ kw['shell'] = isinstance(cmd, str)
+
+ idx = threading.current_thread().idx
+ kw['cmd'] = cmd
+
+ data = cPickle.dumps(kw, -1)
+ params = [REQ, str(len(data))]
+ header = make_header(params)
+
+ conn = CONNS[idx]
+
+ put_data(conn, header)
+ put_data(conn, data)
+
+ data = read_data(conn, HEADER_SIZE)
+ if sys.hexversion > 0x3000000:
+ data = data.decode('iso8859-1')
+
+ lst = data.split(',')
+ ret = int(lst[1])
+ dlen = int(lst[2])
+
+ out = err = None
+ if dlen:
+ data = read_data(conn, dlen)
+ (out, err, exc) = cPickle.loads(data)
+ if exc:
+ raise Utils.WafError('Execution failure: %s' % exc)
+
+ if out:
+ log.write(out)
+ if err:
+ log.write(err)
+
+ return ret
+
+ def __init__(self):
+ threading.Thread.__init__(self)
+
+ # identifier of the current thread
+ self.idx = len(SERVERS)
+
+ # create a server and wait for the connection
+ srv = make_server(self.idx)
+ SERVERS.append(srv)
+
+ conn = None
+ for x in range(30):
+ try:
+ conn = make_conn(srv)
+ break
+ except socket.error:
+ time.sleep(0.01)
+ if not conn:
+ raise ValueError('Could not start the server!')
+ CONNS.append(conn)
+
+ self.setDaemon(1)
+ self.start()
+ Runner.TaskConsumer.__init__ = __init__
+
+ def build(bld):
+ # dangerous, there is no other command hopefully
+ Utils.exec_command = exec_command
diff --git a/third_party/waf/wafadmin/3rdparty/swig.py b/third_party/waf/wafadmin/3rdparty/swig.py
new file mode 100644
index 0000000..393e8e1
--- /dev/null
+++ b/third_party/waf/wafadmin/3rdparty/swig.py
@@ -0,0 +1,189 @@
+#! /usr/bin/env python
+# encoding: UTF-8
+# Petar Forai
+# Thomas Nagy 2008
+
+import re
+import Task, Utils, Logs
+from TaskGen import extension
+from Configure import conf
+import preproc
+
+"""
+Welcome in the hell of adding tasks dynamically
+
+swig interface files may be created at runtime, the module name may be unknown in advance
+
+rev 5859 is much more simple
+"""
+
+SWIG_EXTS = ['.swig', '.i']
+
+swig_str = '${SWIG} ${SWIGFLAGS} ${_CCINCFLAGS} ${_CXXINCFLAGS} ${_CCDEFFLAGS} ${_CXXDEFFLAGS} ${SRC}'
+cls = Task.simple_task_type('swig', swig_str, color='BLUE', ext_in='.i .h', ext_out='.o .c .cxx', shell=False)
+
+def runnable_status(self):
+ for t in self.run_after:
+ if not t.hasrun:
+ return ASK_LATER
+
+ if not getattr(self, 'init_outputs', None):
+ self.init_outputs = True
+ if not getattr(self, 'module', None):
+ # search the module name
+ txt = self.inputs[0].read(self.env)
+ m = re_module.search(txt)
+ if not m:
+ raise ValueError("could not find the swig module name")
+ self.module = m.group(1)
+
+ swig_c(self)
+
+ # add the language-specific output files as nodes
+ # call funs in the dict swig_langs
+ for x in self.env['SWIGFLAGS']:
+ # obtain the language
+ x = x[1:]
+ try:
+ fun = swig_langs[x]
+ except KeyError:
+ pass
+ else:
+ fun(self)
+
+ return Task.Task.runnable_status(self)
+setattr(cls, 'runnable_status', runnable_status)
+
+re_module = re.compile('%module(?:\s*\(.*\))?\s+(.+)', re.M)
+
+re_1 = re.compile(r'^%module.*?\s+([\w]+)\s*?$', re.M)
+re_2 = re.compile('%include "(.*)"', re.M)
+re_3 = re.compile('#include "(.*)"', re.M)
+
+def scan(self):
+ "scan for swig dependencies, climb the .i files"
+ env = self.env
+
+ lst_src = []
+
+ seen = []
+ to_see = [self.inputs[0]]
+
+ while to_see:
+ node = to_see.pop(0)
+ if node.id in seen:
+ continue
+ seen.append(node.id)
+ lst_src.append(node)
+
+ # read the file
+ code = node.read(env)
+ code = preproc.re_nl.sub('', code)
+ code = preproc.re_cpp.sub(preproc.repl, code)
+
+ # find .i files and project headers
+ names = re_2.findall(code) + re_3.findall(code)
+ for n in names:
+ for d in self.generator.env.INC_PATHS + [node.parent]:
+ u = d.find_resource(n)
+ if u:
+ to_see.append(u)
+ break
+ else:
+ Logs.warn('could not find %r' % n)
+
+ # list of nodes this one depends on, and module name if present
+ if Logs.verbose:
+ Logs.debug('deps: deps for %s: %s' % (str(self), str(lst_src)))
+ return (lst_src, [])
+cls.scan = scan
+
+# provide additional language processing
+swig_langs = {}
+def swig(fun):
+ swig_langs[fun.__name__.replace('swig_', '')] = fun
+
+def swig_c(self):
+ ext = '.swigwrap_%d.c' % self.generator.idx
+ flags = self.env['SWIGFLAGS']
+ if '-c++' in flags:
+ ext += 'xx'
+ out_node = self.inputs[0].parent.find_or_declare(self.module + ext)
+
+ try:
+ if '-c++' in flags:
+ fun = self.generator.cxx_hook
+ else:
+ fun = self.generator.c_hook
+ except AttributeError:
+ raise Utils.WafError('No c%s compiler was found to process swig files' % ('-c++' in flags and '++' or ''))
+
+ task = fun(out_node)
+ task.set_run_after(self)
+
+ ge = self.generator.bld.generator
+ ge.outstanding.insert(0, task)
+ ge.total += 1
+
+ try:
+ ltask = self.generator.link_task
+ except AttributeError:
+ pass
+ else:
+ ltask.inputs.append(task.outputs[0])
+
+ self.outputs.append(out_node)
+
+ if not '-o' in self.env['SWIGFLAGS']:
+ self.env.append_value('SWIGFLAGS', '-o')
+ self.env.append_value('SWIGFLAGS', self.outputs[0].abspath(self.env))
+
+@swig
+def swig_python(tsk):
+ tsk.set_outputs(tsk.inputs[0].parent.find_or_declare(tsk.module + '.py'))
+
+@swig
+def swig_ocaml(tsk):
+ tsk.set_outputs(tsk.inputs[0].parent.find_or_declare(tsk.module + '.ml'))
+ tsk.set_outputs(tsk.inputs[0].parent.find_or_declare(tsk.module + '.mli'))
+
+@extension(SWIG_EXTS)
+def i_file(self, node):
+ # the task instance
+ tsk = self.create_task('swig')
+ tsk.set_inputs(node)
+ tsk.module = getattr(self, 'swig_module', None)
+
+ flags = self.to_list(getattr(self, 'swig_flags', []))
+ self.env.append_value('SWIGFLAGS', flags)
+
+ if not '-outdir' in flags:
+ flags.append('-outdir')
+ flags.append(node.parent.abspath(self.env))
+
+@conf
+def check_swig_version(conf, minver=None):
+ """Check for a minimum swig version like conf.check_swig_version('1.3.28')
+ or conf.check_swig_version((1,3,28)) """
+ reg_swig = re.compile(r'SWIG Version\s(.*)', re.M)
+
+ swig_out = Utils.cmd_output('%s -version' % conf.env['SWIG'])
+
+ swigver = [int(s) for s in reg_swig.findall(swig_out)[0].split('.')]
+ if isinstance(minver, basestring):
+ minver = [int(s) for s in minver.split(".")]
+ if isinstance(minver, tuple):
+ minver = [int(s) for s in minver]
+ result = (minver is None) or (minver[:3] <= swigver[:3])
+ swigver_full = '.'.join(map(str, swigver))
+ if result:
+ conf.env['SWIG_VERSION'] = swigver_full
+ minver_str = '.'.join(map(str, minver))
+ if minver is None:
+ conf.check_message_custom('swig version', '', swigver_full)
+ else:
+ conf.check_message('swig version', '>= %s' % (minver_str,), result, option=swigver_full)
+ return result
+
+def detect(conf):
+ swig = conf.find_program('swig', var='SWIG', mandatory=True)
diff --git a/third_party/waf/wafadmin/3rdparty/valadoc.py b/third_party/waf/wafadmin/3rdparty/valadoc.py
new file mode 100644
index 0000000..bdb0c6b
--- /dev/null
+++ b/third_party/waf/wafadmin/3rdparty/valadoc.py
@@ -0,0 +1,112 @@
+#! /usr/bin/env python
+# encoding: UTF-8
+# Nicolas Joseph 2009
+
+from fnmatch import fnmatchcase
+import os, os.path, re, stat
+import Task, Utils, Node, Constants
+from TaskGen import feature, extension, after
+from Logs import debug, warn, error
+
+VALADOC_STR = '${VALADOC}'
+
+class valadoc_task(Task.Task):
+
+ vars = ['VALADOC', 'VALADOCFLAGS']
+ color = 'BLUE'
+ after = 'cxx_link cc_link'
+ quiet = True
+
+ output_dir = ''
+ doclet = ''
+ package_name = ''
+ package_version = ''
+ files = []
+ protected = True
+ private = False
+ inherit = False
+ deps = False
+ enable_non_null_experimental = False
+ force = False
+
+ def runnable_status(self):
+ return True
+
+ def run(self):
+ if self.env['VALADOC']:
+ if not self.env['VALADOCFLAGS']:
+ self.env['VALADOCFLAGS'] = ''
+ cmd = [Utils.subst_vars(VALADOC_STR, self.env)]
+ cmd.append ('-o %s' % self.output_dir)
+ if getattr(self, 'doclet', None):
+ cmd.append ('--doclet %s' % self.doclet)
+ cmd.append ('--package-name %s' % self.package_name)
+ if getattr(self, 'version', None):
+ cmd.append ('--package-version %s' % self.package_version)
+ if getattr(self, 'packages', None):
+ for package in self.packages:
+ cmd.append ('--pkg %s' % package)
+ if getattr(self, 'vapi_dirs', None):
+ for vapi_dir in self.vapi_dirs:
+ cmd.append ('--vapidir %s' % vapi_dir)
+ if not getattr(self, 'protected', None):
+ cmd.append ('--no-protected')
+ if getattr(self, 'private', None):
+ cmd.append ('--private')
+ if getattr(self, 'inherit', None):
+ cmd.append ('--inherit')
+ if getattr(self, 'deps', None):
+ cmd.append ('--deps')
+ if getattr(self, 'enable_non_null_experimental', None):
+ cmd.append ('--enable-non-null-experimental')
+ if getattr(self, 'force', None):
+ cmd.append ('--force')
+ cmd.append (' '.join ([x.relpath_gen (self.generator.bld.bldnode) for x in self.files]))
+ return self.generator.bld.exec_command(' '.join(cmd))
+ else:
+ error ('You must install valadoc <http://live.gnome.org/Valadoc> for generate the API documentation')
+ return -1
+
+@feature('valadoc')
+def process_valadoc(self):
+ task = getattr(self, 'task', None)
+ if not task:
+ task = self.create_task('valadoc')
+ self.task = task
+ if getattr(self, 'output_dir', None):
+ task.output_dir = self.output_dir
+ else:
+ Utils.WafError('no output directory')
+ if getattr(self, 'doclet', None):
+ task.doclet = self.doclet
+ else:
+ Utils.WafError('no doclet directory')
+ if getattr(self, 'package_name', None):
+ task.package_name = self.package_name
+ else:
+ Utils.WafError('no package name')
+ if getattr(self, 'package_version', None):
+ task.package_version = self.package_version
+ if getattr(self, 'packages', None):
+ task.packages = Utils.to_list(self.packages)
+ if getattr(self, 'vapi_dirs', None):
+ task.vapi_dirs = Utils.to_list(self.vapi_dirs)
+ if getattr(self, 'files', None):
+ task.files = self.files
+ else:
+ Utils.WafError('no input file')
+ if getattr(self, 'protected', None):
+ task.protected = self.protected
+ if getattr(self, 'private', None):
+ task.private = self.private
+ if getattr(self, 'inherit', None):
+ task.inherit = self.inherit
+ if getattr(self, 'deps', None):
+ task.deps = self.deps
+ if getattr(self, 'enable_non_null_experimental', None):
+ task.enable_non_null_experimental = self.enable_non_null_experimental
+ if getattr(self, 'force', None):
+ task.force = self.force
+
+def detect(conf):
+ conf.find_program('valadoc', var='VALADOC', mandatory=False)
diff --git a/third_party/waf/wafadmin/Build.py b/third_party/waf/wafadmin/Build.py
new file mode 100644
index 0000000..50f4d7f
--- /dev/null
+++ b/third_party/waf/wafadmin/Build.py
@@ -0,0 +1,1032 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2005 (ita)
+
+"""
+Dependency tree holder
+
+The class Build holds all the info related to a build:
+* file system representation (tree of Node instances)
+* various cached objects (task signatures, file scan results, ..)
+
+There is only one Build object at a time (bld singleton)
+"""
+
+import os, sys, errno, re, glob, gc, datetime, shutil
+try: import cPickle
+except: import pickle as cPickle
+import Runner, TaskGen, Node, Scripting, Utils, Environment, Task, Logs, Options
+from Logs import debug, error, info
+from Constants import *
+
+SAVED_ATTRS = 'root srcnode bldnode node_sigs node_deps raw_deps task_sigs id_nodes'.split()
+"Build class members to save"
+
+bld = None
+"singleton - safe to use when Waf is not used as a library"
+
+class BuildError(Utils.WafError):
+ def __init__(self, b=None, t=[]):
+ self.bld = b
+ self.tasks = t
+ self.ret = 1
+ Utils.WafError.__init__(self, self.format_error())
+
+ def format_error(self):
+ lst = ['Build failed:']
+ for tsk in self.tasks:
+ txt = tsk.format_error()
+ if txt: lst.append(txt)
+ sep = ' '
+ if len(lst) > 2:
+ sep = '\n'
+ return sep.join(lst)
+
+def group_method(fun):
+ """
+ sets a build context method to execute after the current group has finished executing
+ this is useful for installing build files:
+ * calling install_files/install_as will fail if called too early
+ * people do not want to define install method in their task classes
+
+ TODO: try it
+ """
+ def f(*k, **kw):
+ if not k[0].is_install:
+ return False
+
+ postpone = True
+ if 'postpone' in kw:
+ postpone = kw['postpone']
+ del kw['postpone']
+
+ # TODO waf 1.6 in theory there should be no reference to the TaskManager internals here
+ if postpone:
+ m = k[0].task_manager
+ if not m.groups: m.add_group()
+ m.groups[m.current_group].post_funs.append((fun, k, kw))
+ if not 'cwd' in kw:
+ kw['cwd'] = k[0].path
+ else:
+ fun(*k, **kw)
+ return f
+
+class BuildContext(Utils.Context):
+ "holds the dependency tree"
+ def __init__(self):
+
+ # not a singleton, but provided for compatibility
+ global bld
+ bld = self
+
+ self.task_manager = Task.TaskManager()
+
+ # instead of hashing the nodes, we assign them a unique id when they are created
+ self.id_nodes = 0
+ self.idx = {}
+
+ # map names to environments, the 'default' must be defined
+ self.all_envs = {}
+
+ # ======================================= #
+ # code for reading the scripts
+
+ # project build directory - do not reset() from load_dirs()
+ self.bdir = ''
+
+ # the current directory from which the code is run
+ # the folder changes everytime a wscript is read
+ self.path = None
+
+ # Manual dependencies.
+ self.deps_man = Utils.DefaultDict(list)
+
+ # ======================================= #
+ # cache variables
+
+ # local cache for absolute paths - cache_node_abspath[variant][node]
+ self.cache_node_abspath = {}
+
+ # list of folders that are already scanned
+ # so that we do not need to stat them one more time
+ self.cache_scanned_folders = {}
+
+ # list of targets to uninstall for removing the empty folders after uninstalling
+ self.uninstall = []
+
+ # ======================================= #
+ # tasks and objects
+
+ # build dir variants (release, debug, ..)
+ for v in 'cache_node_abspath task_sigs node_deps raw_deps node_sigs'.split():
+ var = {}
+ setattr(self, v, var)
+
+ self.cache_dir_contents = {}
+
+ self.all_task_gen = []
+ self.task_gen_cache_names = {}
+ self.cache_sig_vars = {}
+ self.log = None
+
+ self.root = None
+ self.srcnode = None
+ self.bldnode = None
+
+ # bind the build context to the nodes in use
+ # this means better encapsulation and no build context singleton
+ class node_class(Node.Node):
+ pass
+ self.node_class = node_class
+ self.node_class.__module__ = "Node"
+ self.node_class.__name__ = "Nodu"
+ self.node_class.bld = self
+
+ self.is_install = None
+
+ def __copy__(self):
+ "nodes are not supposed to be copied"
+ raise Utils.WafError('build contexts are not supposed to be cloned')
+
+ def load(self):
+ "load the cache from the disk"
+ try:
+ env = Environment.Environment(os.path.join(self.cachedir, 'build.config.py'))
+ except (IOError, OSError):
+ pass
+ else:
+ if env['version'] < HEXVERSION:
+ raise Utils.WafError('Version mismatch! reconfigure the project')
+ for t in env['tools']:
+ self.setup(**t)
+
+ try:
+ gc.disable()
+ f = data = None
+
+ Node.Nodu = self.node_class
+
+ try:
+ f = open(os.path.join(self.bdir, DBFILE), 'rb')
+ except (IOError, EOFError):
+ # handle missing file/empty file
+ pass
+
+ try:
+ if f: data = cPickle.load(f)
+ except AttributeError:
+ # handle file of an old Waf version
+ # that has an attribute which no longer exist
+ # (e.g. AttributeError: 'module' object has no attribute 'BuildDTO')
+ if Logs.verbose > 1: raise
+
+ if data:
+ for x in SAVED_ATTRS: setattr(self, x, data[x])
+ else:
+ debug('build: Build cache loading failed')
+
+ finally:
+ if f: f.close()
+ gc.enable()
+
+ def save(self):
+ "store the cache on disk, see self.load"
+ gc.disable()
+ self.root.__class__.bld = None
+
+ # some people are very nervous with ctrl+c so we have to make a temporary file
+ Node.Nodu = self.node_class
+ db = os.path.join(self.bdir, DBFILE)
+ file = open(db + '.tmp', 'wb')
+ data = {}
+ for x in SAVED_ATTRS: data[x] = getattr(self, x)
+ cPickle.dump(data, file, -1)
+ file.close()
+
+ # do not use shutil.move
+ try: os.unlink(db)
+ except OSError: pass
+ os.rename(db + '.tmp', db)
+ self.root.__class__.bld = self
+ gc.enable()
+
+ # ======================================= #
+
+ def clean(self):
+ debug('build: clean called')
+
+ # does not clean files created during the configuration
+ precious = set([])
+ for env in self.all_envs.values():
+ for x in env[CFG_FILES]:
+ node = self.srcnode.find_resource(x)
+ if node:
+ precious.add(node.id)
+
+ def clean_rec(node):
+ for x in list(node.childs.keys()):
+ nd = node.childs[x]
+
+ tp = nd.id & 3
+ if tp == Node.DIR:
+ clean_rec(nd)
+ elif tp == Node.BUILD:
+ if nd.id in precious: continue
+ for env in self.all_envs.values():
+ try: os.remove(nd.abspath(env))
+ except OSError: pass
+ node.childs.__delitem__(x)
+
+ clean_rec(self.srcnode)
+
+ for v in 'node_sigs node_deps task_sigs raw_deps cache_node_abspath'.split():
+ setattr(self, v, {})
+
+ def compile(self):
+ """The cache file is not written if nothing was build at all (build is up to date)"""
+ debug('build: compile called')
+
+ """
+ import cProfile, pstats
+ cProfile.run("import Build\nBuild.bld.flush()", 'profi.txt')
+ p = pstats.Stats('profi.txt')
+ p.sort_stats('cumulative').print_stats(80)
+ """
+ self.flush()
+ #"""
+
+ self.generator = Runner.Parallel(self, Options.options.jobs)
+
+ def dw(on=True):
+ if Options.options.progress_bar:
+ if on: sys.stderr.write(Logs.colors.cursor_on)
+ else: sys.stderr.write(Logs.colors.cursor_off)
+
+ debug('build: executor starting')
+
+ back = os.getcwd()
+ os.chdir(self.bldnode.abspath())
+
+ try:
+ try:
+ dw(on=False)
+ self.generator.start()
+ except KeyboardInterrupt:
+ dw()
+ # if self.generator.processed != 1: TODO
+ self.save()
+ raise
+ except Exception:
+ dw()
+ # do not store anything, for something bad happened
+ raise
+ else:
+ dw()
+ #if self.generator.processed != 1: TODO
+ self.save()
+
+ if self.generator.error:
+ raise BuildError(self, self.task_manager.tasks_done)
+
+ finally:
+ os.chdir(back)
+
+ def install(self):
+ "this function is called for both install and uninstall"
+ debug('build: install called')
+
+ self.flush()
+
+ # remove empty folders after uninstalling
+ if self.is_install < 0:
+ lst = []
+ for x in self.uninstall:
+ dir = os.path.dirname(x)
+ if not dir in lst: lst.append(dir)
+ lst.sort()
+ lst.reverse()
+
+ nlst = []
+ for y in lst:
+ x = y
+ while len(x) > 4:
+ if not x in nlst: nlst.append(x)
+ x = os.path.dirname(x)
+
+ nlst.sort()
+ nlst.reverse()
+ for x in nlst:
+ try: os.rmdir(x)
+ except OSError: pass
+
+ def new_task_gen(self, *k, **kw):
+ if self.task_gen_cache_names:
+ self.task_gen_cache_names = {}
+
+ kw['bld'] = self
+ if len(k) == 0:
+ ret = TaskGen.task_gen(*k, **kw)
+ else:
+ cls_name = k[0]
+
+ try: cls = TaskGen.task_gen.classes[cls_name]
+ except KeyError: raise Utils.WscriptError('%s is not a valid task generator -> %s' %
+ (cls_name, [x for x in TaskGen.task_gen.classes]))
+ ret = cls(*k, **kw)
+ return ret
+
+ def __call__(self, *k, **kw):
+ if self.task_gen_cache_names:
+ self.task_gen_cache_names = {}
+
+ kw['bld'] = self
+ return TaskGen.task_gen(*k, **kw)
+
+ def load_envs(self):
+ try:
+ lst = Utils.listdir(self.cachedir)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ raise Utils.WafError('The project was not configured: run "waf configure" first!')
+ else:
+ raise
+
+ if not lst:
+ raise Utils.WafError('The cache directory is empty: reconfigure the project')
+
+ for file in lst:
+ if file.endswith(CACHE_SUFFIX):
+ env = Environment.Environment(os.path.join(self.cachedir, file))
+ name = file[:-len(CACHE_SUFFIX)]
+
+ self.all_envs[name] = env
+
+ self.init_variants()
+
+ for env in self.all_envs.values():
+ for f in env[CFG_FILES]:
+ newnode = self.path.find_or_declare(f)
+ try:
+ hash = Utils.h_file(newnode.abspath(env))
+ except (IOError, AttributeError):
+ error("cannot find "+f)
+ hash = SIG_NIL
+ self.node_sigs[env.variant()][newnode.id] = hash
+
+ # TODO: hmmm, these nodes are removed from the tree when calling rescan()
+ self.bldnode = self.root.find_dir(self.bldnode.abspath())
+ self.path = self.srcnode = self.root.find_dir(self.srcnode.abspath())
+ self.cwd = self.bldnode.abspath()
+
+ def setup(self, tool, tooldir=None, funs=None):
+ "setup tools for build process"
+ if isinstance(tool, list):
+ for i in tool: self.setup(i, tooldir)
+ return
+
+ if not tooldir: tooldir = Options.tooldir
+
+ module = Utils.load_tool(tool, tooldir)
+ if hasattr(module, "setup"): module.setup(self)
+
+ def init_variants(self):
+ debug('build: init variants')
+
+ lstvariants = []
+ for env in self.all_envs.values():
+ if not env.variant() in lstvariants:
+ lstvariants.append(env.variant())
+ self.lst_variants = lstvariants
+
+ debug('build: list of variants is %r', lstvariants)
+
+ for name in lstvariants+[0]:
+ for v in 'node_sigs cache_node_abspath'.split():
+ var = getattr(self, v)
+ if not name in var:
+ var[name] = {}
+
+ # ======================================= #
+ # node and folder handling
+
+ # this should be the main entry point
+ def load_dirs(self, srcdir, blddir, load_cache=1):
+ "this functions should be the start of everything"
+
+ assert(os.path.isabs(srcdir))
+ assert(os.path.isabs(blddir))
+
+ self.cachedir = os.path.join(blddir, CACHE_DIR)
+
+ if srcdir == blddir:
+ raise Utils.WafError("build dir must be different from srcdir: %s <-> %s " % (srcdir, blddir))
+
+ self.bdir = blddir
+
+ # try to load the cache file, if it does not exist, nothing happens
+ self.load()
+
+ if not self.root:
+ Node.Nodu = self.node_class
+ self.root = Node.Nodu('', None, Node.DIR)
+
+ if not self.srcnode:
+ self.srcnode = self.root.ensure_dir_node_from_path(srcdir)
+ debug('build: srcnode is %s and srcdir %s', self.srcnode.name, srcdir)
+
+ self.path = self.srcnode
+
+ # create this build dir if necessary
+ try: os.makedirs(blddir)
+ except OSError: pass
+
+ if not self.bldnode:
+ self.bldnode = self.root.ensure_dir_node_from_path(blddir)
+
+ self.init_variants()
+
+ def rescan(self, src_dir_node):
+ """
+ look the contents of a (folder)node and update its list of childs
+
+ The intent is to perform the following steps
+ * remove the nodes for the files that have disappeared
+ * remove the signatures for the build files that have disappeared
+ * cache the results of os.listdir
+ * create the build folder equivalent (mkdir) for each variant
+ src/bar -> build/default/src/bar, build/release/src/bar
+
+ when a folder in the source directory is removed, we do not check recursively
+ to remove the unused nodes. To do that, call 'waf clean' and build again.
+ """
+
+ # do not rescan over and over again
+ # TODO use a single variable in waf 1.6
+ if self.cache_scanned_folders.get(src_dir_node.id, None): return
+ self.cache_scanned_folders[src_dir_node.id] = True
+
+ # TODO remove in waf 1.6
+ if hasattr(self, 'repository'): self.repository(src_dir_node)
+
+ if not src_dir_node.name and sys.platform == 'win32':
+ # the root has no name, contains drive letters, and cannot be listed
+ return
+
+
+ # first, take the case of the source directory
+ parent_path = src_dir_node.abspath()
+ try:
+ lst = set(Utils.listdir(parent_path))
+ except OSError:
+ lst = set([])
+
+ # TODO move this at the bottom
+ self.cache_dir_contents[src_dir_node.id] = lst
+
+ # hash the existing source files, remove the others
+ cache = self.node_sigs[0]
+ for x in src_dir_node.childs.values():
+ if x.id & 3 != Node.FILE: continue
+ if x.name in lst:
+ try:
+ cache[x.id] = Utils.h_file(x.abspath())
+ except IOError:
+ raise Utils.WafError('The file %s is not readable or has become a dir' % x.abspath())
+ else:
+ try: del cache[x.id]
+ except KeyError: pass
+
+ del src_dir_node.childs[x.name]
+
+
+ # first obtain the differences between srcnode and src_dir_node
+ h1 = self.srcnode.height()
+ h2 = src_dir_node.height()
+
+ lst = []
+ child = src_dir_node
+ while h2 > h1:
+ lst.append(child.name)
+ child = child.parent
+ h2 -= 1
+ lst.reverse()
+
+ # list the files in the build dirs
+ try:
+ for variant in self.lst_variants:
+ sub_path = os.path.join(self.bldnode.abspath(), variant , *lst)
+ self.listdir_bld(src_dir_node, sub_path, variant)
+ except OSError:
+
+ # listdir failed, remove the build node signatures for all variants
+ for node in src_dir_node.childs.values():
+ if node.id & 3 != Node.BUILD:
+ continue
+
+ for dct in self.node_sigs.values():
+ if node.id in dct:
+ dct.__delitem__(node.id)
+
+ # the policy is to avoid removing nodes representing directories
+ src_dir_node.childs.__delitem__(node.name)
+
+ for variant in self.lst_variants:
+ sub_path = os.path.join(self.bldnode.abspath(), variant , *lst)
+ try:
+ os.makedirs(sub_path)
+ except OSError:
+ pass
+
+ # ======================================= #
+ def listdir_src(self, parent_node):
+ """do not use, kept for compatibility"""
+ pass
+
+ def remove_node(self, node):
+ """do not use, kept for compatibility"""
+ pass
+
+ def listdir_bld(self, parent_node, path, variant):
+ """in this method we do not add timestamps but we remove them
+ when the files no longer exist (file removed in the build dir)"""
+
+ i_existing_nodes = [x for x in parent_node.childs.values() if x.id & 3 == Node.BUILD]
+
+ lst = set(Utils.listdir(path))
+ node_names = set([x.name for x in i_existing_nodes])
+ remove_names = node_names - lst
+
+ # remove the stamps of the build nodes that no longer exist on the filesystem
+ ids_to_remove = [x.id for x in i_existing_nodes if x.name in remove_names]
+ cache = self.node_sigs[variant]
+ for nid in ids_to_remove:
+ if nid in cache:
+ cache.__delitem__(nid)
+
+ def get_env(self):
+ return self.env_of_name('default')
+ def set_env(self, name, val):
+ self.all_envs[name] = val
+
+ env = property(get_env, set_env)
+
+ def add_manual_dependency(self, path, value):
+ if isinstance(path, Node.Node):
+ node = path
+ elif os.path.isabs(path):
+ node = self.root.find_resource(path)
+ else:
+ node = self.path.find_resource(path)
+ self.deps_man[node.id].append(value)
+
+ def launch_node(self):
+ """return the launch directory as a node"""
+ # p_ln is kind of private, but public in case if
+ try:
+ return self.p_ln
+ except AttributeError:
+ self.p_ln = self.root.find_dir(Options.launch_dir)
+ return self.p_ln
+
+ def glob(self, pattern, relative=True):
+ "files matching the pattern, seen from the current folder"
+ path = self.path.abspath()
+ files = [self.root.find_resource(x) for x in glob.glob(path+os.sep+pattern)]
+ if relative:
+ files = [x.path_to_parent(self.path) for x in files if x]
+ else:
+ files = [x.abspath() for x in files if x]
+ return files
+
+ ## the following methods are candidates for the stable apis ##
+
+ def add_group(self, *k):
+ self.task_manager.add_group(*k)
+
+ def set_group(self, *k, **kw):
+ self.task_manager.set_group(*k, **kw)
+
+ def hash_env_vars(self, env, vars_lst):
+ """hash environment variables
+ ['CXX', ..] -> [env['CXX'], ..] -> md5()"""
+
+ # ccroot objects use the same environment for building the .o at once
+ # the same environment and the same variables are used
+
+ idx = str(id(env)) + str(vars_lst)
+ try: return self.cache_sig_vars[idx]
+ except KeyError: pass
+
+ lst = [str(env[a]) for a in vars_lst]
+ ret = Utils.h_list(lst)
+ debug('envhash: %r %r', ret, lst)
+
+ # next time
+ self.cache_sig_vars[idx] = ret
+ return ret
+
+ def name_to_obj(self, name, env):
+ """retrieve a task generator from its name or its target name
+ remember that names must be unique"""
+ cache = self.task_gen_cache_names
+ if not cache:
+ # create the index lazily
+ for x in self.all_task_gen:
+ vt = x.env.variant() + '_'
+ if x.name:
+ cache[vt + x.name] = x
+ else:
+ if isinstance(x.target, str):
+ target = x.target
+ else:
+ target = ' '.join(x.target)
+ v = vt + target
+ if not cache.get(v, None):
+ cache[v] = x
+ return cache.get(env.variant() + '_' + name, None)
+
+ def flush(self, all=1):
+ """tell the task generators to create the tasks"""
+
+ self.ini = datetime.datetime.now()
+ # force the initialization of the mapping name->object in flush
+ # name_to_obj can be used in userland scripts, in that case beware of incomplete mapping
+ self.task_gen_cache_names = {}
+ self.name_to_obj('', self.env)
+
+ debug('build: delayed operation TaskGen.flush() called')
+
+ if Options.options.compile_targets:
+ debug('task_gen: posting objects %r listed in compile_targets', Options.options.compile_targets)
+
+ mana = self.task_manager
+ to_post = []
+ min_grp = 0
+
+ # ensure the target names exist, fail before any post()
+ target_objects = Utils.DefaultDict(list)
+ for target_name in Options.options.compile_targets.split(','):
+ # trim target_name (handle cases when the user added spaces to targets)
+ target_name = target_name.strip()
+ for env in self.all_envs.values():
+ tg = self.name_to_obj(target_name, env)
+ if tg:
+ target_objects[target_name].append(tg)
+
+ m = mana.group_idx(tg)
+ if m > min_grp:
+ min_grp = m
+ to_post = [tg]
+ elif m == min_grp:
+ to_post.append(tg)
+
+ if not target_name in target_objects and all:
+ raise Utils.WafError("target '%s' does not exist" % target_name)
+
+ debug('group: Forcing up to group %s for target %s', mana.group_name(min_grp), Options.options.compile_targets)
+
+ # post all the task generators in previous groups
+ for i in xrange(len(mana.groups)):
+ mana.current_group = i
+ if i == min_grp:
+ break
+ g = mana.groups[i]
+ debug('group: Forcing group %s', mana.group_name(g))
+ for t in g.tasks_gen:
+ debug('group: Posting %s', t.name or t.target)
+ t.post()
+
+ # then post the task generators listed in compile_targets in the last group
+ for t in to_post:
+ t.post()
+
+ else:
+ debug('task_gen: posting objects (normal)')
+ ln = self.launch_node()
+ # if the build is started from the build directory, do as if it was started from the top-level
+ # for the pretty-printing (Node.py), the two lines below cannot be moved to Build::launch_node
+ if ln.is_child_of(self.bldnode) or not ln.is_child_of(self.srcnode):
+ ln = self.srcnode
+
+ # if the project file is located under the source directory, build all targets by default
+ # else 'waf configure build' does nothing
+ proj_node = self.root.find_dir(os.path.split(Utils.g_module.root_path)[0])
+ if proj_node.id != self.srcnode.id:
+ ln = self.srcnode
+
+ for i in xrange(len(self.task_manager.groups)):
+ g = self.task_manager.groups[i]
+ self.task_manager.current_group = i
+ if Logs.verbose:
+ groups = [x for x in self.task_manager.groups_names if id(self.task_manager.groups_names[x]) == id(g)]
+ name = groups and groups[0] or 'unnamed'
+ Logs.debug('group: group', name)
+ for tg in g.tasks_gen:
+ if not tg.path.is_child_of(ln):
+ continue
+ if Logs.verbose:
+ Logs.debug('group: %s' % tg)
+ tg.post()
+
+ def env_of_name(self, name):
+ try:
+ return self.all_envs[name]
+ except KeyError:
+ error('no such environment: '+name)
+ return None
+
+ def progress_line(self, state, total, col1, col2):
+ n = len(str(total))
+
+ Utils.rot_idx += 1
+ ind = Utils.rot_chr[Utils.rot_idx % 4]
+
+ ini = self.ini
+
+ pc = (100.*state)/total
+ eta = Utils.get_elapsed_time(ini)
+ fs = "[%%%dd/%%%dd][%%s%%2d%%%%%%s][%s][" % (n, n, ind)
+ left = fs % (state, total, col1, pc, col2)
+ right = '][%s%s%s]' % (col1, eta, col2)
+
+ cols = Utils.get_term_cols() - len(left) - len(right) + 2*len(col1) + 2*len(col2)
+ if cols < 7: cols = 7
+
+ ratio = int((cols*state)/total) - 1
+
+ bar = ('='*ratio+'>').ljust(cols)
+ msg = Utils.indicator % (left, bar, right)
+
+ return msg
+
+
+ # do_install is not used anywhere
+ def do_install(self, src, tgt, chmod=O644):
+ """returns true if the file was effectively installed or uninstalled, false otherwise"""
+ if self.is_install > 0:
+ if not Options.options.force:
+ # check if the file is already there to avoid a copy
+ try:
+ st1 = os.stat(tgt)
+ st2 = os.stat(src)
+ except OSError:
+ pass
+ else:
+ # same size and identical timestamps -> make no copy
+ if st1.st_mtime >= st2.st_mtime and st1.st_size == st2.st_size:
+ return False
+
+ srclbl = src.replace(self.srcnode.abspath(None)+os.sep, '')
+ info("* installing %s as %s" % (srclbl, tgt))
+
+ # following is for shared libs and stale inodes (-_-)
+ try: os.remove(tgt)
+ except OSError: pass
+
+ try:
+ shutil.copy2(src, tgt)
+ os.chmod(tgt, chmod)
+ except IOError:
+ try:
+ os.stat(src)
+ except (OSError, IOError):
+ error('File %r does not exist' % src)
+ raise Utils.WafError('Could not install the file %r' % tgt)
+ return True
+
+ elif self.is_install < 0:
+ info("* uninstalling %s" % tgt)
+
+ self.uninstall.append(tgt)
+
+ try:
+ os.remove(tgt)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ if not getattr(self, 'uninstall_error', None):
+ self.uninstall_error = True
+ Logs.warn('build: some files could not be uninstalled (retry with -vv to list them)')
+ if Logs.verbose > 1:
+ Logs.warn('could not remove %s (error code %r)' % (e.filename, e.errno))
+ return True
+
+ red = re.compile(r"^([A-Za-z]:)?[/\\\\]*")
+ def get_install_path(self, path, env=None):
+ "installation path prefixed by the destdir, the variables like in '${PREFIX}/bin' are substituted"
+ if not env: env = self.env
+ destdir = env.get_destdir()
+ path = path.replace('/', os.sep)
+ destpath = Utils.subst_vars(path, env)
+ if destdir:
+ destpath = os.path.join(destdir, self.red.sub('', destpath))
+ return destpath
+
+ def install_dir(self, path, env=None):
+ """
+ create empty folders for the installation (very rarely used)
+ """
+ if env:
+ assert isinstance(env, Environment.Environment), "invalid parameter"
+ else:
+ env = self.env
+
+ if not path:
+ return []
+
+ destpath = self.get_install_path(path, env)
+
+ if self.is_install > 0:
+ info('* creating %s' % destpath)
+ Utils.check_dir(destpath)
+ elif self.is_install < 0:
+ info('* removing %s' % destpath)
+ self.uninstall.append(destpath + '/xxx') # yes, ugly
+
+ def install_files(self, path, files, env=None, chmod=O644, relative_trick=False, cwd=None):
+ """To install files only after they have been built, put the calls in a method named
+ post_build on the top-level wscript
+
+ The files must be a list and contain paths as strings or as Nodes
+
+ The relative_trick flag can be set to install folders, use bld.path.ant_glob() with it
+ """
+ if env:
+ assert isinstance(env, Environment.Environment), "invalid parameter"
+ else:
+ env = self.env
+
+ if not path: return []
+
+ if not cwd:
+ cwd = self.path
+
+ if isinstance(files, str) and '*' in files:
+ gl = cwd.abspath() + os.sep + files
+ lst = glob.glob(gl)
+ else:
+ lst = Utils.to_list(files)
+
+ if not getattr(lst, '__iter__', False):
+ lst = [lst]
+
+ destpath = self.get_install_path(path, env)
+
+ Utils.check_dir(destpath)
+
+ installed_files = []
+ for filename in lst:
+ if isinstance(filename, str) and os.path.isabs(filename):
+ alst = Utils.split_path(filename)
+ destfile = os.path.join(destpath, alst[-1])
+ else:
+ if isinstance(filename, Node.Node):
+ nd = filename
+ else:
+ nd = cwd.find_resource(filename)
+ if not nd:
+ raise Utils.WafError("Unable to install the file %r (not found in %s)" % (filename, cwd))
+
+ if relative_trick:
+ destfile = os.path.join(destpath, filename)
+ Utils.check_dir(os.path.dirname(destfile))
+ else:
+ destfile = os.path.join(destpath, nd.name)
+
+ filename = nd.abspath(env)
+
+ if self.do_install(filename, destfile, chmod):
+ installed_files.append(destfile)
+ return installed_files
+
+ def install_as(self, path, srcfile, env=None, chmod=O644, cwd=None):
+ """
+ srcfile may be a string or a Node representing the file to install
+
+ returns True if the file was effectively installed, False otherwise
+ """
+ if env:
+ assert isinstance(env, Environment.Environment), "invalid parameter"
+ else:
+ env = self.env
+
+ if not path:
+ raise Utils.WafError("where do you want to install %r? (%r?)" % (srcfile, path))
+
+ if not cwd:
+ cwd = self.path
+
+ destpath = self.get_install_path(path, env)
+
+ dir, name = os.path.split(destpath)
+ Utils.check_dir(dir)
+
+ # the source path
+ if isinstance(srcfile, Node.Node):
+ src = srcfile.abspath(env)
+ else:
+ src = srcfile
+ if not os.path.isabs(srcfile):
+ node = cwd.find_resource(srcfile)
+ if not node:
+ raise Utils.WafError("Unable to install the file %r (not found in %s)" % (srcfile, cwd))
+ src = node.abspath(env)
+
+ return self.do_install(src, destpath, chmod)
+
+ def symlink_as(self, path, src, env=None, cwd=None):
+ """example: bld.symlink_as('${PREFIX}/lib/libfoo.so', 'libfoo.so.1.2.3') """
+
+ if sys.platform == 'win32':
+ # well, this *cannot* work
+ return
+
+ if not path:
+ raise Utils.WafError("where do you want to install %r? (%r?)" % (src, path))
+
+ tgt = self.get_install_path(path, env)
+
+ dir, name = os.path.split(tgt)
+ Utils.check_dir(dir)
+
+ if self.is_install > 0:
+ link = False
+ if not os.path.islink(tgt):
+ link = True
+ elif os.readlink(tgt) != src:
+ link = True
+
+ if link:
+ try: os.remove(tgt)
+ except OSError: pass
+
+ info('* symlink %s (-> %s)' % (tgt, src))
+ os.symlink(src, tgt)
+ return 0
+
+ else: # UNINSTALL
+ try:
+ info('* removing %s' % (tgt))
+ os.remove(tgt)
+ return 0
+ except OSError:
+ return 1
+
+ def exec_command(self, cmd, **kw):
+ # 'runner' zone is printed out for waf -v, see wafadmin/Options.py
+ debug('runner: system command -> %s', cmd)
+ if self.log:
+ self.log.write('%s\n' % cmd)
+ kw['log'] = self.log
+ try:
+ if not kw.get('cwd', None):
+ kw['cwd'] = self.cwd
+ except AttributeError:
+ self.cwd = kw['cwd'] = self.bldnode.abspath()
+ return Utils.exec_command(cmd, **kw)
+
+ def printout(self, s):
+ f = self.log or sys.stderr
+ f.write(s)
+ f.flush()
+
+ def add_subdirs(self, dirs):
+ self.recurse(dirs, 'build')
+
+ def pre_recurse(self, name_or_mod, path, nexdir):
+ if not hasattr(self, 'oldpath'):
+ self.oldpath = []
+ self.oldpath.append(self.path)
+ self.path = self.root.find_dir(nexdir)
+ return {'bld': self, 'ctx': self}
+
+ def post_recurse(self, name_or_mod, path, nexdir):
+ self.path = self.oldpath.pop()
+
+ ###### user-defined behaviour
+
+ def pre_build(self):
+ if hasattr(self, 'pre_funs'):
+ for m in self.pre_funs:
+ m(self)
+
+ def post_build(self):
+ if hasattr(self, 'post_funs'):
+ for m in self.post_funs:
+ m(self)
+
+ def add_pre_fun(self, meth):
+ try: self.pre_funs.append(meth)
+ except AttributeError: self.pre_funs = [meth]
+
+ def add_post_fun(self, meth):
+ try: self.post_funs.append(meth)
+ except AttributeError: self.post_funs = [meth]
+
+ def use_the_magic(self):
+ Task.algotype = Task.MAXPARALLEL
+ Task.file_deps = Task.extract_deps
+ self.magic = True
+
+ install_as = group_method(install_as)
+ install_files = group_method(install_files)
+ symlink_as = group_method(symlink_as)
diff --git a/third_party/waf/wafadmin/Configure.py b/third_party/waf/wafadmin/Configure.py
new file mode 100644
index 0000000..7575cef
--- /dev/null
+++ b/third_party/waf/wafadmin/Configure.py
@@ -0,0 +1,442 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2005-2008 (ita)
+
+"""
+Configuration system
+
+A configuration instance is created when "waf configure" is called, it is used to:
+* create data dictionaries (Environment instances)
+* store the list of modules to import
+
+The old model (copied from Scons) was to store logic (mapping file extensions to functions)
+along with the data. In Waf a way was found to separate that logic by adding an indirection
+layer (storing the names in the Environment instances)
+
+In the new model, the logic is more object-oriented, and the user scripts provide the
+logic. The data files (Environments) must contain configuration data only (flags, ..).
+
+Note: the c/c++ related code is in the module config_c
+"""
+
+import os, shlex, sys, time
+try: import cPickle
+except ImportError: import pickle as cPickle
+import Environment, Utils, Options, Logs
+from Logs import warn
+from Constants import *
+
+try:
+ from urllib import request
+except:
+ from urllib import urlopen
+else:
+ urlopen = request.urlopen
+
+conf_template = '''# project %(app)s configured on %(now)s by
+# waf %(wafver)s (abi %(abi)s, python %(pyver)x on %(systype)s)
+# using %(args)s
+#
+'''
+
+class ConfigurationError(Utils.WscriptError):
+ pass
+
+autoconfig = False
+"reconfigure the project automatically"
+
+def find_file(filename, path_list):
+ """find a file in a list of paths
+ @param filename: name of the file to search for
+ @param path_list: list of directories to search
+ @return: the first occurrence filename or '' if filename could not be found
+"""
+ for directory in Utils.to_list(path_list):
+ if os.path.exists(os.path.join(directory, filename)):
+ return directory
+ return ''
+
+def find_program_impl(env, filename, path_list=[], var=None, environ=None):
+ """find a program in folders path_lst, and sets env[var]
+ @param env: environment
+ @param filename: name of the program to search for
+ @param path_list: list of directories to search for filename
+ @param var: environment value to be checked for in env or os.environ
+ @return: either the value that is referenced with [var] in env or os.environ
+ or the first occurrence filename or '' if filename could not be found
+"""
+
+ if not environ:
+ environ = os.environ
+
+ try: path_list = path_list.split()
+ except AttributeError: pass
+
+ if var:
+ if env[var]: return env[var]
+ if var in environ: env[var] = environ[var]
+
+ if not path_list: path_list = environ.get('PATH', '').split(os.pathsep)
+
+ ext = (Options.platform == 'win32') and '.exe,.com,.bat,.cmd' or ''
+ for y in [filename+x for x in ext.split(',')]:
+ for directory in path_list:
+ x = os.path.join(directory, y)
+ if os.path.isfile(x):
+ if var: env[var] = x
+ return x
+ return ''
+
+class ConfigurationContext(Utils.Context):
+ tests = {}
+ error_handlers = []
+ def __init__(self, env=None, blddir='', srcdir=''):
+ self.env = None
+ self.envname = ''
+
+ self.environ = dict(os.environ)
+
+ self.line_just = 40
+
+ self.blddir = blddir
+ self.srcdir = srcdir
+ self.all_envs = {}
+
+ # curdir: necessary for recursion
+ self.cwd = self.curdir = os.getcwd()
+
+ self.tools = [] # tools loaded in the configuration, and that will be loaded when building
+
+ self.setenv(DEFAULT)
+
+ self.lastprog = ''
+
+ self.hash = 0
+ self.files = []
+
+ self.tool_cache = []
+
+ if self.blddir:
+ self.post_init()
+
+ def post_init(self):
+
+ self.cachedir = os.path.join(self.blddir, CACHE_DIR)
+
+ path = os.path.join(self.blddir, WAF_CONFIG_LOG)
+ try: os.unlink(path)
+ except (OSError, IOError): pass
+
+ try:
+ self.log = open(path, 'w')
+ except (OSError, IOError):
+ self.fatal('could not open %r for writing' % path)
+
+ app = Utils.g_module.APPNAME
+ if app:
+ ver = getattr(Utils.g_module, 'VERSION', '')
+ if ver:
+ app = "%s (%s)" % (app, ver)
+
+ now = time.ctime()
+ pyver = sys.hexversion
+ systype = sys.platform
+ args = " ".join(sys.argv)
+ wafver = WAFVERSION
+ abi = ABI
+ self.log.write(conf_template % vars())
+
+ def __del__(self):
+ """cleanup function: close config.log"""
+
+ # may be ran by the gc, not always after initialization
+ if hasattr(self, 'log') and self.log:
+ self.log.close()
+
+ def fatal(self, msg):
+ raise ConfigurationError(msg)
+
+ def check_tool(self, input, tooldir=None, funs=None):
+ "load a waf tool"
+
+ tools = Utils.to_list(input)
+ if tooldir: tooldir = Utils.to_list(tooldir)
+ for tool in tools:
+ tool = tool.replace('++', 'xx')
+ if tool == 'java': tool = 'javaw'
+ if tool.lower() == 'unittest': tool = 'unittestw'
+ # avoid loading the same tool more than once with the same functions
+ # used by composite projects
+
+ mag = (tool, id(self.env), funs)
+ if mag in self.tool_cache:
+ continue
+ self.tool_cache.append(mag)
+
+ module = None
+ try:
+ module = Utils.load_tool(tool, tooldir)
+ except Exception, e:
+ ex = e
+ if Options.options.download:
+ _3rdparty = os.path.normpath(Options.tooldir[0] + os.sep + '..' + os.sep + '3rdparty')
+
+ # try to download the tool from the repository then
+ # the default is set to false
+ for x in Utils.to_list(Options.remote_repo):
+ for sub in ['branches/waf-%s/wafadmin/3rdparty' % WAFVERSION, 'trunk/wafadmin/3rdparty']:
+ url = '/'.join((x, sub, tool + '.py'))
+ try:
+ web = urlopen(url)
+ if web.getcode() != 200:
+ continue
+ except Exception, e:
+ # on python3 urlopen throws an exception
+ continue
+ else:
+ loc = None
+ try:
+ loc = open(_3rdparty + os.sep + tool + '.py', 'wb')
+ loc.write(web.read())
+ web.close()
+ finally:
+ if loc:
+ loc.close()
+ Logs.warn('downloaded %s from %s' % (tool, url))
+ try:
+ module = Utils.load_tool(tool, tooldir)
+ except:
+ Logs.warn('module %s from %s is unusable' % (tool, url))
+ try:
+ os.unlink(_3rdparty + os.sep + tool + '.py')
+ except:
+ pass
+ continue
+ else:
+ break
+
+ if not module:
+ Logs.error('Could not load the tool %r or download a suitable replacement from the repository (sys.path %r)\n%s' % (tool, sys.path, e))
+ raise ex
+ else:
+ Logs.error('Could not load the tool %r in %r (try the --download option?):\n%s' % (tool, sys.path, e))
+ raise ex
+
+ if funs is not None:
+ self.eval_rules(funs)
+ else:
+ func = getattr(module, 'detect', None)
+ if func:
+ if type(func) is type(find_file): func(self)
+ else: self.eval_rules(func)
+
+ self.tools.append({'tool':tool, 'tooldir':tooldir, 'funs':funs})
+
+ def sub_config(self, k):
+ "executes the configure function of a wscript module"
+ self.recurse(k, name='configure')
+
+ def pre_recurse(self, name_or_mod, path, nexdir):
+ return {'conf': self, 'ctx': self}
+
+ def post_recurse(self, name_or_mod, path, nexdir):
+ if not autoconfig:
+ return
+ self.hash = hash((self.hash, getattr(name_or_mod, 'waf_hash_val', name_or_mod)))
+ self.files.append(path)
+
+ def store(self, file=''):
+ "save the config results into the cache file"
+ if not os.path.isdir(self.cachedir):
+ os.makedirs(self.cachedir)
+
+ if not file:
+ file = open(os.path.join(self.cachedir, 'build.config.py'), 'w')
+ file.write('version = 0x%x\n' % HEXVERSION)
+ file.write('tools = %r\n' % self.tools)
+ file.close()
+
+ if not self.all_envs:
+ self.fatal('nothing to store in the configuration context!')
+ for key in self.all_envs:
+ tmpenv = self.all_envs[key]
+ tmpenv.store(os.path.join(self.cachedir, key + CACHE_SUFFIX))
+
+ def set_env_name(self, name, env):
+ "add a new environment called name"
+ self.all_envs[name] = env
+ return env
+
+ def retrieve(self, name, fromenv=None):
+ "retrieve an environment called name"
+ try:
+ env = self.all_envs[name]
+ except KeyError:
+ env = Environment.Environment()
+ env['PREFIX'] = os.path.abspath(os.path.expanduser(Options.options.prefix))
+ self.all_envs[name] = env
+ else:
+ if fromenv: warn("The environment %s may have been configured already" % name)
+ return env
+
+ def setenv(self, name):
+ "enable the environment called name"
+ self.env = self.retrieve(name)
+ self.envname = name
+
+ def add_os_flags(self, var, dest=None):
+ # do not use 'get' to make certain the variable is not defined
+ try: self.env.append_value(dest or var, Utils.to_list(self.environ[var]))
+ except KeyError: pass
+
+ def check_message_1(self, sr):
+ self.line_just = max(self.line_just, len(sr))
+ for x in ('\n', self.line_just * '-', '\n', sr, '\n'):
+ self.log.write(x)
+ Utils.pprint('NORMAL', "%s :" % sr.ljust(self.line_just), sep='')
+
+ def check_message_2(self, sr, color='GREEN'):
+ self.log.write(sr)
+ self.log.write('\n')
+ Utils.pprint(color, sr)
+
+ def check_message(self, th, msg, state, option=''):
+ sr = 'Checking for %s %s' % (th, msg)
+ self.check_message_1(sr)
+ p = self.check_message_2
+ if state: p('ok ' + str(option))
+ else: p('not found', 'YELLOW')
+
+ # FIXME remove in waf 1.6
+ # the parameter 'option' is not used (kept for compatibility)
+ def check_message_custom(self, th, msg, custom, option='', color='PINK'):
+ sr = 'Checking for %s %s' % (th, msg)
+ self.check_message_1(sr)
+ self.check_message_2(custom, color)
+
+ def msg(self, msg, result, color=None):
+ """Prints a configuration message 'Checking for xxx: ok'"""
+ self.start_msg('Checking for ' + msg)
+
+ if not isinstance(color, str):
+ color = result and 'GREEN' or 'YELLOW'
+
+ self.end_msg(result, color)
+
+ def start_msg(self, msg):
+ try:
+ if self.in_msg:
+ return
+ except:
+ self.in_msg = 0
+ self.in_msg += 1
+
+ self.line_just = max(self.line_just, len(msg))
+ for x in ('\n', self.line_just * '-', '\n', msg, '\n'):
+ self.log.write(x)
+ Utils.pprint('NORMAL', "%s :" % msg.ljust(self.line_just), sep='')
+
+ def end_msg(self, result, color):
+ self.in_msg -= 1
+ if self.in_msg:
+ return
+
+ if not color:
+ color = 'GREEN'
+ if result == True:
+ msg = 'ok'
+ elif result == False:
+ msg = 'not found'
+ color = 'YELLOW'
+ else:
+ msg = str(result)
+
+ self.log.write(msg)
+ self.log.write('\n')
+ Utils.pprint(color, msg)
+
+ def find_program(self, filename, path_list=[], var=None, mandatory=False):
+ "wrapper that adds a configuration message"
+
+ ret = None
+ if var:
+ if self.env[var]:
+ ret = self.env[var]
+ elif var in os.environ:
+ ret = os.environ[var]
+
+ if not isinstance(filename, list): filename = [filename]
+ if not ret:
+ for x in filename:
+ ret = find_program_impl(self.env, x, path_list, var, environ=self.environ)
+ if ret: break
+
+ self.check_message_1('Checking for program %s' % ' or '.join(filename))
+ self.log.write(' find program=%r paths=%r var=%r\n -> %r\n' % (filename, path_list, var, ret))
+ if ret:
+ Utils.pprint('GREEN', str(ret))
+ else:
+ Utils.pprint('YELLOW', 'not found')
+ if mandatory:
+ self.fatal('The program %r is required' % filename)
+
+ if var:
+ self.env[var] = ret
+ return ret
+
+ def cmd_to_list(self, cmd):
+ "commands may be written in pseudo shell like 'ccache g++'"
+ if isinstance(cmd, str) and cmd.find(' '):
+ try:
+ os.stat(cmd)
+ except OSError:
+ return shlex.split(cmd)
+ else:
+ return [cmd]
+ return cmd
+
+ def __getattr__(self, name):
+ r = self.__class__.__dict__.get(name, None)
+ if r: return r
+ if name and name.startswith('require_'):
+
+ for k in ['check_', 'find_']:
+ n = name.replace('require_', k)
+ ret = self.__class__.__dict__.get(n, None)
+ if ret:
+ def run(*k, **kw):
+ r = ret(self, *k, **kw)
+ if not r:
+ self.fatal('requirement failure')
+ return r
+ return run
+ self.fatal('No such method %r' % name)
+
+ def eval_rules(self, rules):
+ self.rules = Utils.to_list(rules)
+ for x in self.rules:
+ f = getattr(self, x)
+ if not f: self.fatal("No such method '%s'." % x)
+ try:
+ f()
+ except Exception, e:
+ ret = self.err_handler(x, e)
+ if ret == BREAK:
+ break
+ elif ret == CONTINUE:
+ continue
+ else:
+ self.fatal(e)
+
+ def err_handler(self, fun, error):
+ pass
+
+def conf(f):
+ "decorator: attach new configuration functions"
+ setattr(ConfigurationContext, f.__name__, f)
+ return f
+
+def conftest(f):
+ "decorator: attach new configuration tests (registered as strings)"
+ ConfigurationContext.tests[f.__name__] = f
+ return conf(f)
diff --git a/third_party/waf/wafadmin/Constants.py b/third_party/waf/wafadmin/Constants.py
new file mode 100644
index 0000000..30960b9
--- /dev/null
+++ b/third_party/waf/wafadmin/Constants.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Yinon dot me gmail 2008
+
+"""
+these constants are somewhat public, try not to mess them
+
+maintainer: the version number is updated from the top-level wscript file
+"""
+
+# do not touch these three lines, they are updated automatically
+HEXVERSION=0x105019
+WAFVERSION="1.5.19"
+WAFREVISION = "9709M"
+ABI = 7
+
+# permissions
+O644 = 420
+O755 = 493
+
+MAXJOBS = 99999999
+
+CACHE_DIR = 'c4che'
+CACHE_SUFFIX = '.cache.py'
+DBFILE = '.wafpickle-%d' % ABI
+WSCRIPT_FILE = 'wscript'
+WSCRIPT_BUILD_FILE = 'wscript_build'
+WAF_CONFIG_LOG = 'config.log'
+WAF_CONFIG_H = 'config.h'
+
+SIG_NIL = 'iluvcuteoverload'
+
+VARIANT = '_VARIANT_'
+DEFAULT = 'default'
+
+SRCDIR = 'srcdir'
+BLDDIR = 'blddir'
+APPNAME = 'APPNAME'
+VERSION = 'VERSION'
+
+DEFINES = 'defines'
+UNDEFINED = ()
+
+BREAK = "break"
+CONTINUE = "continue"
+
+# task scheduler options
+JOBCONTROL = "JOBCONTROL"
+MAXPARALLEL = "MAXPARALLEL"
+NORMAL = "NORMAL"
+
+# task state
+NOT_RUN = 0
+MISSING = 1
+CRASHED = 2
+EXCEPTION = 3
+SKIPPED = 8
+SUCCESS = 9
+
+ASK_LATER = -1
+SKIP_ME = -2
+RUN_ME = -3
+
+
+LOG_FORMAT = "%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s"
+HOUR_FORMAT = "%H:%M:%S"
+
+TEST_OK = True
+
+CFG_FILES = 'cfg_files'
+
+# positive '->' install
+# negative '<-' uninstall
+INSTALL = 1337
+UNINSTALL = -1337
diff --git a/third_party/waf/wafadmin/Environment.py b/third_party/waf/wafadmin/Environment.py
new file mode 100644
index 0000000..bea4146
--- /dev/null
+++ b/third_party/waf/wafadmin/Environment.py
@@ -0,0 +1,209 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2005 (ita)
+
+"""Environment representation
+
+There is one gotcha: getitem returns [] if the contents evals to False
+This means env['foo'] = {}; print env['foo'] will print [] not {}
+"""
+
+import os, copy, re
+import Logs, Options, Utils
+from Constants import *
+re_imp = re.compile('^(#)*?([^#=]*?)\ =\ (.*?)$', re.M)
+
+class Environment(object):
+ """A safe-to-use dictionary, but do not attach functions to it please (break cPickle)
+ An environment instance can be stored into a file and loaded easily
+ """
+ __slots__ = ("table", "parent")
+ def __init__(self, filename=None):
+ self.table = {}
+ #self.parent = None
+
+ if filename:
+ self.load(filename)
+
+ def __contains__(self, key):
+ if key in self.table: return True
+ try: return self.parent.__contains__(key)
+ except AttributeError: return False # parent may not exist
+
+ def __str__(self):
+ keys = set()
+ cur = self
+ while cur:
+ keys.update(cur.table.keys())
+ cur = getattr(cur, 'parent', None)
+ keys = list(keys)
+ keys.sort()
+ return "\n".join(["%r %r" % (x, self.__getitem__(x)) for x in keys])
+
+ def __getitem__(self, key):
+ try:
+ while 1:
+ x = self.table.get(key, None)
+ if not x is None:
+ return x
+ self = self.parent
+ except AttributeError:
+ return []
+
+ def __setitem__(self, key, value):
+ self.table[key] = value
+
+ def __delitem__(self, key):
+ del self.table[key]
+
+ def pop(self, key, *args):
+ if len(args):
+ return self.table.pop(key, *args)
+ return self.table.pop(key)
+
+ def set_variant(self, name):
+ self.table[VARIANT] = name
+
+ def variant(self):
+ try:
+ while 1:
+ x = self.table.get(VARIANT, None)
+ if not x is None:
+ return x
+ self = self.parent
+ except AttributeError:
+ return DEFAULT
+
+ def copy(self):
+ # TODO waf 1.6 rename this method derive, #368
+ newenv = Environment()
+ newenv.parent = self
+ return newenv
+
+ def detach(self):
+ """TODO try it
+ modifying the original env will not change the copy"""
+ tbl = self.get_merged_dict()
+ try:
+ delattr(self, 'parent')
+ except AttributeError:
+ pass
+ else:
+ keys = tbl.keys()
+ for x in keys:
+ tbl[x] = copy.deepcopy(tbl[x])
+ self.table = tbl
+
+ def get_flat(self, key):
+ s = self[key]
+ if isinstance(s, str): return s
+ return ' '.join(s)
+
+ def _get_list_value_for_modification(self, key):
+ """Gets a value that must be a list for further modification. The
+ list may be modified inplace and there is no need to
+ "self.table[var] = value" afterwards.
+ """
+ try:
+ value = self.table[key]
+ except KeyError:
+ try: value = self.parent[key]
+ except AttributeError: value = []
+ if isinstance(value, list):
+ value = value[:]
+ else:
+ value = [value]
+ else:
+ if not isinstance(value, list):
+ value = [value]
+ self.table[key] = value
+ return value
+
+ def append_value(self, var, value):
+ current_value = self._get_list_value_for_modification(var)
+
+ if isinstance(value, list):
+ current_value.extend(value)
+ else:
+ current_value.append(value)
+
+ def prepend_value(self, var, value):
+ current_value = self._get_list_value_for_modification(var)
+
+ if isinstance(value, list):
+ current_value = value + current_value
+ # a new list: update the dictionary entry
+ self.table[var] = current_value
+ else:
+ current_value.insert(0, value)
+
+ # prepend unique would be ambiguous
+ def append_unique(self, var, value):
+ current_value = self._get_list_value_for_modification(var)
+
+ if isinstance(value, list):
+ for value_item in value:
+ if value_item not in current_value:
+ current_value.append(value_item)
+ else:
+ if value not in current_value:
+ current_value.append(value)
+
+ def get_merged_dict(self):
+ """compute a merged table"""
+ table_list = []
+ env = self
+ while 1:
+ table_list.insert(0, env.table)
+ try: env = env.parent
+ except AttributeError: break
+ merged_table = {}
+ for table in table_list:
+ merged_table.update(table)
+ return merged_table
+
+ def store(self, filename):
+ "Write the variables into a file"
+ file = open(filename, 'w')
+ merged_table = self.get_merged_dict()
+ keys = list(merged_table.keys())
+ keys.sort()
+ for k in keys: file.write('%s = %r\n' % (k, merged_table[k]))
+ file.close()
+
+ def load(self, filename):
+ "Retrieve the variables from a file"
+ tbl = self.table
+ code = Utils.readf(filename)
+ for m in re_imp.finditer(code):
+ g = m.group
+ tbl[g(2)] = eval(g(3))
+ Logs.debug('env: %s', self.table)
+
+ def get_destdir(self):
+ "return the destdir, useful for installing"
+ if self.__getitem__('NOINSTALL'): return ''
+ return Options.options.destdir
+
+ def update(self, d):
+ for k, v in d.iteritems():
+ self[k] = v
+
+
+ def __getattr__(self, name):
+ if name in self.__slots__:
+ return object.__getattr__(self, name)
+ else:
+ return self[name]
+
+ def __setattr__(self, name, value):
+ if name in self.__slots__:
+ object.__setattr__(self, name, value)
+ else:
+ self[name] = value
+
+ def __delattr__(self, name):
+ if name in self.__slots__:
+ object.__delattr__(self, name)
+ else:
+ del self[name]
diff --git a/third_party/waf/wafadmin/Logs.py b/third_party/waf/wafadmin/Logs.py
new file mode 100644
index 0000000..f67e87c
--- /dev/null
+++ b/third_party/waf/wafadmin/Logs.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2005 (ita)
+
+import ansiterm
+import os, re, logging, traceback, sys
+from Constants import *
+
+zones = ''
+verbose = 0
+
+colors_lst = {
+'USE' : True,
+'BOLD' :'\x1b[01;1m',
+'RED' :'\x1b[01;31m',
+'GREEN' :'\x1b[32m',
+'YELLOW':'\x1b[33m',
+'PINK' :'\x1b[35m',
+'BLUE' :'\x1b[01;34m',
+'CYAN' :'\x1b[36m',
+'NORMAL':'\x1b[0m',
+'cursor_on' :'\x1b[?25h',
+'cursor_off' :'\x1b[?25l',
+}
+
+got_tty = False
+term = os.environ.get('TERM', 'dumb')
+if not term in ['dumb', 'emacs']:
+ try:
+ got_tty = sys.stderr.isatty() or (sys.platform == 'win32' and term in ['xterm', 'msys'])
+ except AttributeError:
+ pass
+
+import Utils
+
+if not got_tty or 'NOCOLOR' in os.environ:
+ colors_lst['USE'] = False
+
+# test
+#if sys.platform == 'win32':
+# colors_lst['USE'] = True
+
+def get_color(cl):
+ if not colors_lst['USE']: return ''
+ return colors_lst.get(cl, '')
+
+class foo(object):
+ def __getattr__(self, a):
+ return get_color(a)
+ def __call__(self, a):
+ return get_color(a)
+
+colors = foo()
+
+re_log = re.compile(r'(\w+): (.*)', re.M)
+class log_filter(logging.Filter):
+ def __init__(self, name=None):
+ pass
+
+ def filter(self, rec):
+ rec.c1 = colors.PINK
+ rec.c2 = colors.NORMAL
+ rec.zone = rec.module
+ if rec.levelno >= logging.INFO:
+ if rec.levelno >= logging.ERROR:
+ rec.c1 = colors.RED
+ elif rec.levelno >= logging.WARNING:
+ rec.c1 = colors.YELLOW
+ else:
+ rec.c1 = colors.GREEN
+ return True
+
+ zone = ''
+ m = re_log.match(rec.msg)
+ if m:
+ zone = rec.zone = m.group(1)
+ rec.msg = m.group(2)
+
+ if zones:
+ return getattr(rec, 'zone', '') in zones or '*' in zones
+ elif not verbose > 2:
+ return False
+ return True
+
+class formatter(logging.Formatter):
+ def __init__(self):
+ logging.Formatter.__init__(self, LOG_FORMAT, HOUR_FORMAT)
+
+ def format(self, rec):
+ if rec.levelno >= logging.WARNING or rec.levelno == logging.INFO:
+ try:
+ return '%s%s%s' % (rec.c1, rec.msg.decode('utf-8'), rec.c2)
+ except:
+ return rec.c1+rec.msg+rec.c2
+ return logging.Formatter.format(self, rec)
+
+def debug(*k, **kw):
+ if verbose:
+ k = list(k)
+ k[0] = k[0].replace('\n', ' ')
+ logging.debug(*k, **kw)
+
+def error(*k, **kw):
+ logging.error(*k, **kw)
+ if verbose > 1:
+ if isinstance(k[0], Utils.WafError):
+ st = k[0].stack
+ else:
+ st = traceback.extract_stack()
+ if st:
+ st = st[:-1]
+ buf = []
+ for filename, lineno, name, line in st:
+ buf.append(' File "%s", line %d, in %s' % (filename, lineno, name))
+ if line:
+ buf.append(' %s' % line.strip())
+ if buf: logging.error("\n".join(buf))
+
+warn = logging.warn
+info = logging.info
+
+def init_log():
+ log = logging.getLogger()
+ log.handlers = []
+ log.filters = []
+ hdlr = logging.StreamHandler()
+ hdlr.setFormatter(formatter())
+ log.addHandler(hdlr)
+ log.addFilter(log_filter())
+ log.setLevel(logging.DEBUG)
+
+# may be initialized more than once
+init_log()
diff --git a/third_party/waf/wafadmin/Node.py b/third_party/waf/wafadmin/Node.py
new file mode 100644
index 0000000..158a4a4
--- /dev/null
+++ b/third_party/waf/wafadmin/Node.py
@@ -0,0 +1,694 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2005 (ita)
+
+"""
+Node: filesystem structure, contains lists of nodes
+
+IMPORTANT:
+1. Each file/folder is represented by exactly one node.
+
+2. Most would-be class properties are stored in Build: nodes to depend on, signature, flags, ..
+unused class members increase the .wafpickle file size sensibly with lots of objects.
+
+3. The build is launched from the top of the build dir (for example, in _build_/).
+
+4. Node should not be instantiated directly.
+Each instance of Build.BuildContext has a Node subclass.
+(aka: 'Nodu', see BuildContext initializer)
+The BuildContext is referenced here as self.__class__.bld
+Its Node class is referenced here as self.__class__
+
+The public and advertised apis are the following:
+${TGT} -> dir/to/file.ext
+${TGT[0].base()} -> dir/to/file
+${TGT[0].dir(env)} -> dir/to
+${TGT[0].file()} -> file.ext
+${TGT[0].file_base()} -> file
+${TGT[0].suffix()} -> .ext
+${TGT[0].abspath(env)} -> /path/to/dir/to/file.ext
+
+"""
+
+import os, sys, fnmatch, re, stat
+import Utils, Constants
+
+UNDEFINED = 0
+DIR = 1
+FILE = 2
+BUILD = 3
+
+type_to_string = {UNDEFINED: "unk", DIR: "dir", FILE: "src", BUILD: "bld"}
+
+# These fnmatch expressions are used by default to prune the directory tree
+# while doing the recursive traversal in the find_iter method of the Node class.
+prune_pats = '.git .bzr .hg .svn _MTN _darcs CVS SCCS'.split()
+
+# These fnmatch expressions are used by default to exclude files and dirs
+# while doing the recursive traversal in the find_iter method of the Node class.
+exclude_pats = prune_pats + '*~ #*# .#* %*% ._* .gitignore .cvsignore vssver.scc .DS_Store'.split()
+
+# These Utils.jar_regexp expressions are used by default to exclude files and dirs and also prune the directory tree
+# while doing the recursive traversal in the ant_glob method of the Node class.
+exclude_regs = '''
+**/*~
+**/#*#
+**/.#*
+**/%*%
+**/._*
+**/CVS
+**/CVS/**
+**/.cvsignore
+**/SCCS
+**/SCCS/**
+**/vssver.scc
+**/.svn
+**/.svn/**
+**/.git
+**/.git/**
+**/.gitignore
+**/.bzr
+**/.bzr/**
+**/.hg
+**/.hg/**
+**/_MTN
+**/_MTN/**
+**/_darcs
+**/_darcs/**
+**/.DS_Store'''
+
+class Node(object):
+ __slots__ = ("name", "parent", "id", "childs")
+ def __init__(self, name, parent, node_type = UNDEFINED):
+ self.name = name
+ self.parent = parent
+
+ # assumption: one build object at a time
+ self.__class__.bld.id_nodes += 4
+ self.id = self.__class__.bld.id_nodes + node_type
+
+ if node_type == DIR: self.childs = {}
+
+ # We do not want to add another type attribute (memory)
+ # use the id to find out: type = id & 3
+ # for setting: new type = type + x - type & 3
+
+ if parent and name in parent.childs:
+ raise Utils.WafError('node %s exists in the parent files %r already' % (name, parent))
+
+ if parent: parent.childs[name] = self
+
+ def __setstate__(self, data):
+ if len(data) == 4:
+ (self.parent, self.name, self.id, self.childs) = data
+ else:
+ (self.parent, self.name, self.id) = data
+
+ def __getstate__(self):
+ if getattr(self, 'childs', None) is None:
+ return (self.parent, self.name, self.id)
+ else:
+ return (self.parent, self.name, self.id, self.childs)
+
+ def __str__(self):
+ if not self.parent: return ''
+ return "%s://%s" % (type_to_string[self.id & 3], self.abspath())
+
+ def __repr__(self):
+ return self.__str__()
+
+ def __hash__(self):
+ "expensive, make certain it is not used"
+ raise Utils.WafError('nodes, you are doing it wrong')
+
+ def __copy__(self):
+ "nodes are not supposed to be copied"
+ raise Utils.WafError('nodes are not supposed to be cloned')
+
+ def get_type(self):
+ return self.id & 3
+
+ def set_type(self, t):
+ "dangerous, you are not supposed to use this"
+ self.id = self.id + t - self.id & 3
+
+ def dirs(self):
+ return [x for x in self.childs.values() if x.id & 3 == DIR]
+
+ def files(self):
+ return [x for x in self.childs.values() if x.id & 3 == FILE]
+
+ def get_dir(self, name, default=None):
+ node = self.childs.get(name, None)
+ if not node or node.id & 3 != DIR: return default
+ return node
+
+ def get_file(self, name, default=None):
+ node = self.childs.get(name, None)
+ if not node or node.id & 3 != FILE: return default
+ return node
+
+ def get_build(self, name, default=None):
+ node = self.childs.get(name, None)
+ if not node or node.id & 3 != BUILD: return default
+ return node
+
+ def find_resource(self, lst):
+ "Find an existing input file: either a build node declared previously or a source node"
+ if isinstance(lst, str):
+ lst = Utils.split_path(lst)
+
+ if len(lst) == 1:
+ parent = self
+ else:
+ parent = self.find_dir(lst[:-1])
+ if not parent: return None
+ self.__class__.bld.rescan(parent)
+
+ name = lst[-1]
+ node = parent.childs.get(name, None)
+ if node:
+ tp = node.id & 3
+ if tp == FILE or tp == BUILD:
+ return node
+ else:
+ return None
+
+ tree = self.__class__.bld
+ if not name in tree.cache_dir_contents[parent.id]:
+ return None
+
+ path = parent.abspath() + os.sep + name
+ try:
+ st = Utils.h_file(path)
+ except IOError:
+ return None
+
+ child = self.__class__(name, parent, FILE)
+ tree.node_sigs[0][child.id] = st
+ return child
+
+ def find_or_declare(self, lst):
+ "Used for declaring a build node representing a file being built"
+ if isinstance(lst, str):
+ lst = Utils.split_path(lst)
+
+ if len(lst) == 1:
+ parent = self
+ else:
+ parent = self.find_dir(lst[:-1])
+ if not parent: return None
+ self.__class__.bld.rescan(parent)
+
+ name = lst[-1]
+ node = parent.childs.get(name, None)
+ if node:
+ tp = node.id & 3
+ if tp != BUILD:
+ raise Utils.WafError('find_or_declare found a source file where a build file was expected %r' % '/'.join(lst))
+ return node
+ node = self.__class__(name, parent, BUILD)
+ return node
+
+ def find_dir(self, lst):
+ "search a folder in the filesystem"
+
+ if isinstance(lst, str):
+ lst = Utils.split_path(lst)
+
+ current = self
+ for name in lst:
+ self.__class__.bld.rescan(current)
+ prev = current
+
+ if not current.parent and name == current.name:
+ continue
+ elif not name:
+ continue
+ elif name == '.':
+ continue
+ elif name == '..':
+ current = current.parent or current
+ else:
+ current = prev.childs.get(name, None)
+ if current is None:
+ dir_cont = self.__class__.bld.cache_dir_contents
+ if prev.id in dir_cont and name in dir_cont[prev.id]:
+ if not prev.name:
+ if os.sep == '/':
+ # cygwin //machine/share
+ dirname = os.sep + name
+ else:
+ # windows c:
+ dirname = name
+ else:
+ # regular path
+ dirname = prev.abspath() + os.sep + name
+ if not os.path.isdir(dirname):
+ return None
+ current = self.__class__(name, prev, DIR)
+ elif (not prev.name and len(name) == 2 and name[1] == ':') or name.startswith('\\\\'):
+ # drive letter or \\ path for windows
+ current = self.__class__(name, prev, DIR)
+ else:
+ return None
+ else:
+ if current.id & 3 != DIR:
+ return None
+ return current
+
+ def ensure_dir_node_from_path(self, lst):
+ "used very rarely, force the construction of a branch of node instance for representing folders"
+
+ if isinstance(lst, str):
+ lst = Utils.split_path(lst)
+
+ current = self
+ for name in lst:
+ if not name:
+ continue
+ elif name == '.':
+ continue
+ elif name == '..':
+ current = current.parent or current
+ else:
+ prev = current
+ current = prev.childs.get(name, None)
+ if current is None:
+ current = self.__class__(name, prev, DIR)
+ return current
+
+ def exclusive_build_node(self, path):
+ """
+ create a hierarchy in the build dir (no source folders) for ill-behaving compilers
+ the node is not hashed, so you must do it manually
+
+ after declaring such a node, find_dir and find_resource should work as expected
+ """
+ lst = Utils.split_path(path)
+ name = lst[-1]
+ if len(lst) > 1:
+ parent = None
+ try:
+ parent = self.find_dir(lst[:-1])
+ except OSError:
+ pass
+ if not parent:
+ parent = self.ensure_dir_node_from_path(lst[:-1])
+ self.__class__.bld.rescan(parent)
+ else:
+ try:
+ self.__class__.bld.rescan(parent)
+ except OSError:
+ pass
+ else:
+ parent = self
+
+ node = parent.childs.get(name, None)
+ if not node:
+ node = self.__class__(name, parent, BUILD)
+
+ return node
+
+ def path_to_parent(self, parent):
+ "path relative to a direct ancestor, as string"
+ lst = []
+ p = self
+ h1 = parent.height()
+ h2 = p.height()
+ while h2 > h1:
+ h2 -= 1
+ lst.append(p.name)
+ p = p.parent
+ if lst:
+ lst.reverse()
+ ret = os.path.join(*lst)
+ else:
+ ret = ''
+ return ret
+
+ def find_ancestor(self, node):
+ "find a common ancestor for two nodes - for the shortest path in hierarchy"
+ dist = self.height() - node.height()
+ if dist < 0: return node.find_ancestor(self)
+ # now the real code
+ cand = self
+ while dist > 0:
+ cand = cand.parent
+ dist -= 1
+ if cand == node: return cand
+ cursor = node
+ while cand.parent:
+ cand = cand.parent
+ cursor = cursor.parent
+ if cand == cursor: return cand
+
+ def relpath_gen(self, from_node):
+ "string representing a relative path between self to another node"
+
+ if self == from_node: return '.'
+ if from_node.parent == self: return '..'
+
+ # up_path is '../../../' and down_path is 'dir/subdir/subdir/file'
+ ancestor = self.find_ancestor(from_node)
+ lst = []
+ cand = self
+ while not cand.id == ancestor.id:
+ lst.append(cand.name)
+ cand = cand.parent
+ cand = from_node
+ while not cand.id == ancestor.id:
+ lst.append('..')
+ cand = cand.parent
+ lst.reverse()
+ return os.sep.join(lst)
+
+ def nice_path(self, env=None):
+ "printed in the console, open files easily from the launch directory"
+ tree = self.__class__.bld
+ ln = tree.launch_node()
+
+ if self.id & 3 == FILE: return self.relpath_gen(ln)
+ else: return os.path.join(tree.bldnode.relpath_gen(ln), env.variant(), self.relpath_gen(tree.srcnode))
+
+ def is_child_of(self, node):
+ "does this node belong to the subtree node"
+ p = self
+ diff = self.height() - node.height()
+ while diff > 0:
+ diff -= 1
+ p = p.parent
+ return p.id == node.id
+
+ def variant(self, env):
+ "variant, or output directory for this node, a source has for variant 0"
+ if not env: return 0
+ elif self.id & 3 == FILE: return 0
+ else: return env.variant()
+
+ def height(self):
+ "amount of parents"
+ # README a cache can be added here if necessary
+ d = self
+ val = -1
+ while d:
+ d = d.parent
+ val += 1
+ return val
+
+ # helpers for building things
+
+ def abspath(self, env=None):
+ """
+ absolute path
+ @param env [Environment]:
+ * obligatory for build nodes: build/variant/src/dir/bar.o
+ * optional for dirs: get either src/dir or build/variant/src/dir
+ * excluded for source nodes: src/dir/bar.c
+
+ Instead of computing the absolute path each time again,
+ store the already-computed absolute paths in one of (variants+1) dictionaries:
+ bld.cache_node_abspath[0] holds absolute paths for source nodes.
+ bld.cache_node_abspath[variant] holds the absolute path for the build nodes
+ which reside in the variant given by env.
+ """
+ ## absolute path - hot zone, so do not touch
+
+ # less expensive
+ variant = (env and (self.id & 3 != FILE) and env.variant()) or 0
+
+ ret = self.__class__.bld.cache_node_abspath[variant].get(self.id, None)
+ if ret: return ret
+
+ if not variant:
+ # source directory
+ if not self.parent:
+ val = os.sep == '/' and os.sep or ''
+ elif not self.parent.name: # root
+ val = (os.sep == '/' and os.sep or '') + self.name
+ else:
+ val = self.parent.abspath() + os.sep + self.name
+ else:
+ # build directory
+ val = os.sep.join((self.__class__.bld.bldnode.abspath(), variant, self.path_to_parent(self.__class__.bld.srcnode)))
+ self.__class__.bld.cache_node_abspath[variant][self.id] = val
+ return val
+
+ def change_ext(self, ext):
+ "node of the same path, but with a different extension - hot zone so do not touch"
+ name = self.name
+ k = name.rfind('.')
+ if k >= 0:
+ name = name[:k] + ext
+ else:
+ name = name + ext
+
+ return self.parent.find_or_declare([name])
+
+ def src_dir(self, env):
+ "src path without the file name"
+ return self.parent.srcpath(env)
+
+ def bld_dir(self, env):
+ "build path without the file name"
+ return self.parent.bldpath(env)
+
+ def bld_base(self, env):
+ "build path without the extension: src/dir/foo(.cpp)"
+ s = os.path.splitext(self.name)[0]
+ return os.path.join(self.bld_dir(env), s)
+
+ def bldpath(self, env=None):
+ "path seen from the build dir default/src/foo.cpp"
+ if self.id & 3 == FILE:
+ return self.relpath_gen(self.__class__.bld.bldnode)
+ p = self.path_to_parent(self.__class__.bld.srcnode)
+ if p is not '':
+ return env.variant() + os.sep + p
+ return env.variant()
+
+ def srcpath(self, env=None):
+ "path in the srcdir from the build dir ../src/foo.cpp"
+ if self.id & 3 == BUILD:
+ return self.bldpath(env)
+ return self.relpath_gen(self.__class__.bld.bldnode)
+
+ def read(self, env):
+ "get the contents of a file, it is not used anywhere for the moment"
+ return Utils.readf(self.abspath(env))
+
+ def dir(self, env):
+ "scons-like"
+ return self.parent.abspath(env)
+
+ def file(self):
+ "scons-like"
+ return self.name
+
+ def file_base(self):
+ "scons-like"
+ return os.path.splitext(self.name)[0]
+
+ def suffix(self):
+ "scons-like - hot zone so do not touch"
+ k = max(0, self.name.rfind('.'))
+ return self.name[k:]
+
+ def find_iter_impl(self, src=True, bld=True, dir=True, accept_name=None, is_prune=None, maxdepth=25):
+ """find nodes in the filesystem hierarchy, try to instanciate the nodes passively; same gotcha as ant_glob"""
+ bld_ctx = self.__class__.bld
+ bld_ctx.rescan(self)
+ for name in bld_ctx.cache_dir_contents[self.id]:
+ if accept_name(self, name):
+ node = self.find_resource(name)
+ if node:
+ if src and node.id & 3 == FILE:
+ yield node
+ else:
+ node = self.find_dir(name)
+ if node and node.id != bld_ctx.bldnode.id:
+ if dir:
+ yield node
+ if not is_prune(self, name):
+ if maxdepth:
+ for k in node.find_iter_impl(src, bld, dir, accept_name, is_prune, maxdepth=maxdepth - 1):
+ yield k
+ else:
+ if not is_prune(self, name):
+ node = self.find_resource(name)
+ if not node:
+ # not a file, it is a dir
+ node = self.find_dir(name)
+ if node and node.id != bld_ctx.bldnode.id:
+ if maxdepth:
+ for k in node.find_iter_impl(src, bld, dir, accept_name, is_prune, maxdepth=maxdepth - 1):
+ yield k
+
+ if bld:
+ for node in self.childs.values():
+ if node.id == bld_ctx.bldnode.id:
+ continue
+ if node.id & 3 == BUILD:
+ if accept_name(self, node.name):
+ yield node
+ raise StopIteration
+
+ def find_iter(self, in_pat=['*'], ex_pat=exclude_pats, prune_pat=prune_pats, src=True, bld=True, dir=False, maxdepth=25, flat=False):
+ """find nodes recursively, this returns everything but folders by default; same gotcha as ant_glob"""
+
+ if not (src or bld or dir):
+ raise StopIteration
+
+ if self.id & 3 != DIR:
+ raise StopIteration
+
+ in_pat = Utils.to_list(in_pat)
+ ex_pat = Utils.to_list(ex_pat)
+ prune_pat = Utils.to_list(prune_pat)
+
+ def accept_name(node, name):
+ for pat in ex_pat:
+ if fnmatch.fnmatchcase(name, pat):
+ return False
+ for pat in in_pat:
+ if fnmatch.fnmatchcase(name, pat):
+ return True
+ return False
+
+ def is_prune(node, name):
+ for pat in prune_pat:
+ if fnmatch.fnmatchcase(name, pat):
+ return True
+ return False
+
+ ret = self.find_iter_impl(src, bld, dir, accept_name, is_prune, maxdepth=maxdepth)
+ if flat:
+ return " ".join([x.relpath_gen(self) for x in ret])
+
+ return ret
+
+ def ant_glob(self, *k, **kw):
+ """
+ known gotcha: will enumerate the files, but only if the folder exists in the source directory
+ """
+
+ src=kw.get('src', 1)
+ bld=kw.get('bld', 0)
+ dir=kw.get('dir', 0)
+ excl = kw.get('excl', exclude_regs)
+ incl = k and k[0] or kw.get('incl', '**')
+
+ def to_pat(s):
+ lst = Utils.to_list(s)
+ ret = []
+ for x in lst:
+ x = x.replace('//', '/')
+ if x.endswith('/'):
+ x += '**'
+ lst2 = x.split('/')
+ accu = []
+ for k in lst2:
+ if k == '**':
+ accu.append(k)
+ else:
+ k = k.replace('.', '[.]').replace('*', '.*').replace('?', '.')
+ k = '^%s$' % k
+ #print "pattern", k
+ accu.append(re.compile(k))
+ ret.append(accu)
+ return ret
+
+ def filtre(name, nn):
+ ret = []
+ for lst in nn:
+ if not lst:
+ pass
+ elif lst[0] == '**':
+ ret.append(lst)
+ if len(lst) > 1:
+ if lst[1].match(name):
+ ret.append(lst[2:])
+ else:
+ ret.append([])
+ elif lst[0].match(name):
+ ret.append(lst[1:])
+ return ret
+
+ def accept(name, pats):
+ nacc = filtre(name, pats[0])
+ nrej = filtre(name, pats[1])
+ if [] in nrej:
+ nacc = []
+ return [nacc, nrej]
+
+ def ant_iter(nodi, maxdepth=25, pats=[]):
+ nodi.__class__.bld.rescan(nodi)
+ tmp = list(nodi.__class__.bld.cache_dir_contents[nodi.id])
+ tmp.sort()
+ for name in tmp:
+ npats = accept(name, pats)
+ if npats and npats[0]:
+ accepted = [] in npats[0]
+ #print accepted, nodi, name
+
+ node = nodi.find_resource(name)
+ if node and accepted:
+ if src and node.id & 3 == FILE:
+ yield node
+ else:
+ node = nodi.find_dir(name)
+ if node and node.id != nodi.__class__.bld.bldnode.id:
+ if accepted and dir:
+ yield node
+ if maxdepth:
+ for k in ant_iter(node, maxdepth=maxdepth - 1, pats=npats):
+ yield k
+ if bld:
+ for node in nodi.childs.values():
+ if node.id == nodi.__class__.bld.bldnode.id:
+ continue
+ if node.id & 3 == BUILD:
+ npats = accept(node.name, pats)
+ if npats and npats[0] and [] in npats[0]:
+ yield node
+ raise StopIteration
+
+ ret = [x for x in ant_iter(self, pats=[to_pat(incl), to_pat(excl)])]
+
+ if kw.get('flat', True):
+ return " ".join([x.relpath_gen(self) for x in ret])
+
+ return ret
+
+ def update_build_dir(self, env=None):
+
+ if not env:
+ for env in self.bld.all_envs:
+ self.update_build_dir(env)
+ return
+
+ path = self.abspath(env)
+
+ lst = Utils.listdir(path)
+ try:
+ self.__class__.bld.cache_dir_contents[self.id].update(lst)
+ except KeyError:
+ self.__class__.bld.cache_dir_contents[self.id] = set(lst)
+ self.__class__.bld.cache_scanned_folders[self.id] = True
+
+ for k in lst:
+ npath = path + os.sep + k
+ st = os.stat(npath)
+ if stat.S_ISREG(st[stat.ST_MODE]):
+ ick = self.find_or_declare(k)
+ if not (ick.id in self.__class__.bld.node_sigs[env.variant()]):
+ self.__class__.bld.node_sigs[env.variant()][ick.id] = Constants.SIG_NIL
+ elif stat.S_ISDIR(st[stat.ST_MODE]):
+ child = self.find_dir(k)
+ if not child:
+ child = self.ensure_dir_node_from_path(k)
+ child.update_build_dir(env)
+
+
+class Nodu(Node):
+ pass
diff --git a/third_party/waf/wafadmin/Options.py b/third_party/waf/wafadmin/Options.py
new file mode 100644
index 0000000..7e87c11
--- /dev/null
+++ b/third_party/waf/wafadmin/Options.py
@@ -0,0 +1,287 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Scott Newton, 2005 (scottn)
+# Thomas Nagy, 2006 (ita)
+
+"Custom command-line options"
+
+import os, sys, imp, types, tempfile, optparse
+import Logs, Utils
+from Constants import *
+
+cmds = 'distclean configure build install clean uninstall check dist distcheck'.split()
+
+# TODO remove in waf 1.6 the following two
+commands = {}
+is_install = False
+
+options = {}
+arg_line = []
+launch_dir = ''
+tooldir = ''
+lockfile = os.environ.get('WAFLOCK', '.lock-wscript')
+try: cache_global = os.path.abspath(os.environ['WAFCACHE'])
+except KeyError: cache_global = ''
+platform = Utils.unversioned_sys_platform()
+conf_file = 'conf-runs-%s-%d.pickle' % (platform, ABI)
+
+remote_repo = ['http://waf.googlecode.com/svn/']
+"""remote directory for the plugins"""
+
+
+# Such a command-line should work: JOBS=4 PREFIX=/opt/ DESTDIR=/tmp/ahoj/ waf configure
+default_prefix = os.environ.get('PREFIX')
+if not default_prefix:
+ if platform == 'win32':
+ d = tempfile.gettempdir()
+ default_prefix = d[0].upper() + d[1:]
+ # win32 preserves the case, but gettempdir does not
+ else: default_prefix = '/usr/local/'
+
+default_jobs = os.environ.get('JOBS', -1)
+if default_jobs < 1:
+ try:
+ if 'SC_NPROCESSORS_ONLN' in os.sysconf_names:
+ default_jobs = os.sysconf('SC_NPROCESSORS_ONLN')
+ else:
+ default_jobs = int(Utils.cmd_output(['sysctl', '-n', 'hw.ncpu']))
+ except:
+ if os.name == 'java': # platform.system() == 'Java'
+ from java.lang import Runtime
+ default_jobs = Runtime.getRuntime().availableProcessors()
+ else:
+ # environment var defined on win32
+ default_jobs = int(os.environ.get('NUMBER_OF_PROCESSORS', 1))
+
+default_destdir = os.environ.get('DESTDIR', '')
+
+def get_usage(self):
+ cmds_str = []
+ module = Utils.g_module
+ if module:
+ # create the help messages for commands
+ tbl = module.__dict__
+ keys = list(tbl.keys())
+ keys.sort()
+
+ if 'build' in tbl:
+ if not module.build.__doc__:
+ module.build.__doc__ = 'builds the project'
+ if 'configure' in tbl:
+ if not module.configure.__doc__:
+ module.configure.__doc__ = 'configures the project'
+
+ ban = ['set_options', 'init', 'shutdown']
+
+ optlst = [x for x in keys if not x in ban
+ and type(tbl[x]) is type(parse_args_impl)
+ and tbl[x].__doc__
+ and not x.startswith('_')]
+
+ just = max([len(x) for x in optlst])
+
+ for x in optlst:
+ cmds_str.append(' %s: %s' % (x.ljust(just), tbl[x].__doc__))
+ ret = '\n'.join(cmds_str)
+ else:
+ ret = ' '.join(cmds)
+ return '''waf [command] [options]
+
+Main commands (example: ./waf build -j4)
+%s
+''' % ret
+
+
+setattr(optparse.OptionParser, 'get_usage', get_usage)
+
+def create_parser(module=None):
+ Logs.debug('options: create_parser is called')
+ parser = optparse.OptionParser(conflict_handler="resolve", version = 'waf %s (%s)' % (WAFVERSION, WAFREVISION))
+
+ parser.formatter.width = Utils.get_term_cols()
+ p = parser.add_option
+
+ p('-j', '--jobs',
+ type = 'int',
+ default = default_jobs,
+ help = 'amount of parallel jobs (%r)' % default_jobs,
+ dest = 'jobs')
+
+ p('-k', '--keep',
+ action = 'store_true',
+ default = False,
+ help = 'keep running happily on independent task groups',
+ dest = 'keep')
+
+ p('-v', '--verbose',
+ action = 'count',
+ default = 0,
+ help = 'verbosity level -v -vv or -vvv [default: 0]',
+ dest = 'verbose')
+
+ p('--nocache',
+ action = 'store_true',
+ default = False,
+ help = 'ignore the WAFCACHE (if set)',
+ dest = 'nocache')
+
+ p('--zones',
+ action = 'store',
+ default = '',
+ help = 'debugging zones (task_gen, deps, tasks, etc)',
+ dest = 'zones')
+
+ p('-p', '--progress',
+ action = 'count',
+ default = 0,
+ help = '-p: progress bar; -pp: ide output',
+ dest = 'progress_bar')
+
+ p('--targets',
+ action = 'store',
+ default = '',
+ help = 'build given task generators, e.g. "target1,target2"',
+ dest = 'compile_targets')
+
+ gr = optparse.OptionGroup(parser, 'configuration options')
+ parser.add_option_group(gr)
+ gr.add_option('-b', '--blddir',
+ action = 'store',
+ default = '',
+ help = 'out dir for the project (configuration)',
+ dest = 'blddir')
+ gr.add_option('-s', '--srcdir',
+ action = 'store',
+ default = '',
+ help = 'top dir for the project (configuration)',
+ dest = 'srcdir')
+ gr.add_option('--prefix',
+ help = 'installation prefix (configuration) [default: %r]' % default_prefix,
+ default = default_prefix,
+ dest = 'prefix')
+
+ gr.add_option('--download',
+ action = 'store_true',
+ default = False,
+ help = 'try to download the tools if missing',
+ dest = 'download')
+
+ gr = optparse.OptionGroup(parser, 'installation options')
+ parser.add_option_group(gr)
+ gr.add_option('--destdir',
+ help = 'installation root [default: %r]' % default_destdir,
+ default = default_destdir,
+ dest = 'destdir')
+ gr.add_option('-f', '--force',
+ action = 'store_true',
+ default = False,
+ help = 'force file installation',
+ dest = 'force')
+
+ return parser
+
+def parse_args_impl(parser, _args=None):
+ global options, commands, arg_line
+ (options, args) = parser.parse_args(args=_args)
+
+ arg_line = args
+ #arg_line = args[:] # copy
+
+ # By default, 'waf' is equivalent to 'waf build'
+ commands = {}
+ for var in cmds: commands[var] = 0
+ if not args:
+ commands['build'] = 1
+ args.append('build')
+
+ # Parse the command arguments
+ for arg in args:
+ commands[arg] = True
+
+ # the check thing depends on the build
+ if 'check' in args:
+ idx = args.index('check')
+ try:
+ bidx = args.index('build')
+ if bidx > idx:
+ raise ValueError('build before check')
+ except ValueError, e:
+ args.insert(idx, 'build')
+
+ if args[0] != 'init':
+ args.insert(0, 'init')
+
+ # TODO -k => -j0
+ if options.keep: options.jobs = 1
+ if options.jobs < 1: options.jobs = 1
+
+ if 'install' in sys.argv or 'uninstall' in sys.argv:
+ # absolute path only if set
+ options.destdir = options.destdir and os.path.abspath(os.path.expanduser(options.destdir))
+
+ Logs.verbose = options.verbose
+ Logs.init_log()
+
+ if options.zones:
+ Logs.zones = options.zones.split(',')
+ if not Logs.verbose: Logs.verbose = 1
+ elif Logs.verbose > 0:
+ Logs.zones = ['runner']
+ if Logs.verbose > 2:
+ Logs.zones = ['*']
+
+# TODO waf 1.6
+# 1. rename the class to OptionsContext
+# 2. instead of a class attribute, use a module (static 'parser')
+# 3. parse_args_impl was made in times when we did not know about binding new methods to classes
+
+class Handler(Utils.Context):
+ """loads wscript modules in folders for adding options
+ This class should be named 'OptionsContext'
+ A method named 'recurse' is bound when used by the module Scripting"""
+
+ parser = None
+ # make it possible to access the reference, like Build.bld
+
+ def __init__(self, module=None):
+ self.parser = create_parser(module)
+ self.cwd = os.getcwd()
+ Handler.parser = self
+
+ def add_option(self, *k, **kw):
+ self.parser.add_option(*k, **kw)
+
+ def add_option_group(self, *k, **kw):
+ return self.parser.add_option_group(*k, **kw)
+
+ def get_option_group(self, opt_str):
+ return self.parser.get_option_group(opt_str)
+
+ def sub_options(self, *k, **kw):
+ if not k: raise Utils.WscriptError('folder expected')
+ self.recurse(k[0], name='set_options')
+
+ def tool_options(self, *k, **kw):
+ Utils.python_24_guard()
+
+ if not k[0]:
+ raise Utils.WscriptError('invalid tool_options call %r %r' % (k, kw))
+ tools = Utils.to_list(k[0])
+
+ # TODO waf 1.6 remove the global variable tooldir
+ path = Utils.to_list(kw.get('tdir', kw.get('tooldir', tooldir)))
+
+ for tool in tools:
+ tool = tool.replace('++', 'xx')
+ if tool == 'java': tool = 'javaw'
+ if tool.lower() == 'unittest': tool = 'unittestw'
+ module = Utils.load_tool(tool, path)
+ try:
+ fun = module.set_options
+ except AttributeError:
+ pass
+ else:
+ fun(kw.get('option_group', self))
+
+ def parse_args(self, args=None):
+ parse_args_impl(self.parser, args)
diff --git a/third_party/waf/wafadmin/Runner.py b/third_party/waf/wafadmin/Runner.py
new file mode 100644
index 0000000..0d2dea5
--- /dev/null
+++ b/third_party/waf/wafadmin/Runner.py
@@ -0,0 +1,235 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2005-2008 (ita)
+
+"Execute the tasks"
+
+import os, sys, random, time, threading, traceback
+try: from Queue import Queue
+except ImportError: from queue import Queue
+import Build, Utils, Logs, Options
+from Logs import debug, error
+from Constants import *
+
+GAP = 15
+
+run_old = threading.Thread.run
+def run(*args, **kwargs):
+ try:
+ run_old(*args, **kwargs)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ sys.excepthook(*sys.exc_info())
+threading.Thread.run = run
+
+def process_task(tsk):
+
+ m = tsk.master
+ if m.stop:
+ m.out.put(tsk)
+ return
+
+ try:
+ tsk.generator.bld.printout(tsk.display())
+ if tsk.__class__.stat: ret = tsk.__class__.stat(tsk)
+ # actual call to task's run() function
+ else: ret = tsk.call_run()
+ except Exception, e:
+ tsk.err_msg = Utils.ex_stack()
+ tsk.hasrun = EXCEPTION
+
+ # TODO cleanup
+ m.error_handler(tsk)
+ m.out.put(tsk)
+ return
+
+ if ret:
+ tsk.err_code = ret
+ tsk.hasrun = CRASHED
+ else:
+ try:
+ tsk.post_run()
+ except Utils.WafError:
+ pass
+ except Exception:
+ tsk.err_msg = Utils.ex_stack()
+ tsk.hasrun = EXCEPTION
+ else:
+ tsk.hasrun = SUCCESS
+ if tsk.hasrun != SUCCESS:
+ m.error_handler(tsk)
+
+ m.out.put(tsk)
+
+class TaskConsumer(threading.Thread):
+ ready = Queue(0)
+ consumers = []
+
+ def __init__(self):
+ threading.Thread.__init__(self)
+ self.setDaemon(1)
+ self.start()
+
+ def run(self):
+ try:
+ self.loop()
+ except:
+ pass
+
+ def loop(self):
+ while 1:
+ tsk = TaskConsumer.ready.get()
+ process_task(tsk)
+
+class Parallel(object):
+ """
+ keep the consumer threads busy, and avoid consuming cpu cycles
+ when no more tasks can be added (end of the build, etc)
+ """
+ def __init__(self, bld, j=2):
+
+ # number of consumers
+ self.numjobs = j
+
+ self.manager = bld.task_manager
+ self.manager.current_group = 0
+
+ self.total = self.manager.total()
+
+ # tasks waiting to be processed - IMPORTANT
+ self.outstanding = []
+ self.maxjobs = MAXJOBS
+
+ # tasks that are awaiting for another task to complete
+ self.frozen = []
+
+ # tasks returned by the consumers
+ self.out = Queue(0)
+
+ self.count = 0 # tasks not in the producer area
+
+ self.processed = 1 # progress indicator
+
+ self.stop = False # error condition to stop the build
+ self.error = False # error flag
+
+ def get_next(self):
+ "override this method to schedule the tasks in a particular order"
+ if not self.outstanding:
+ return None
+ return self.outstanding.pop(0)
+
+ def postpone(self, tsk):
+ "override this method to schedule the tasks in a particular order"
+ # TODO consider using a deque instead
+ if random.randint(0, 1):
+ self.frozen.insert(0, tsk)
+ else:
+ self.frozen.append(tsk)
+
+ def refill_task_list(self):
+ "called to set the next group of tasks"
+
+ while self.count > self.numjobs + GAP or self.count >= self.maxjobs:
+ self.get_out()
+
+ while not self.outstanding:
+ if self.count:
+ self.get_out()
+
+ if self.frozen:
+ self.outstanding += self.frozen
+ self.frozen = []
+ elif not self.count:
+ (jobs, tmp) = self.manager.get_next_set()
+ if jobs != None: self.maxjobs = jobs
+ if tmp: self.outstanding += tmp
+ break
+
+ def get_out(self):
+ "the tasks that are put to execute are all collected using get_out"
+ ret = self.out.get()
+ self.manager.add_finished(ret)
+ if not self.stop and getattr(ret, 'more_tasks', None):
+ self.outstanding += ret.more_tasks
+ self.total += len(ret.more_tasks)
+ self.count -= 1
+
+ def error_handler(self, tsk):
+ "by default, errors make the build stop (not thread safe so be careful)"
+ if not Options.options.keep:
+ self.stop = True
+ self.error = True
+
+ def start(self):
+ "execute the tasks"
+
+ if TaskConsumer.consumers:
+ # the worker pool is usually loaded lazily (see below)
+ # in case it is re-used with a different value of numjobs:
+ while len(TaskConsumer.consumers) < self.numjobs:
+ TaskConsumer.consumers.append(TaskConsumer())
+
+ while not self.stop:
+
+ self.refill_task_list()
+
+ # consider the next task
+ tsk = self.get_next()
+ if not tsk:
+ if self.count:
+ # tasks may add new ones after they are run
+ continue
+ else:
+ # no tasks to run, no tasks running, time to exit
+ break
+
+ if tsk.hasrun:
+ # if the task is marked as "run", just skip it
+ self.processed += 1
+ self.manager.add_finished(tsk)
+ continue
+
+ try:
+ st = tsk.runnable_status()
+ except Exception, e:
+ self.processed += 1
+ if self.stop and not Options.options.keep:
+ tsk.hasrun = SKIPPED
+ self.manager.add_finished(tsk)
+ continue
+ self.error_handler(tsk)
+ self.manager.add_finished(tsk)
+ tsk.hasrun = EXCEPTION
+ tsk.err_msg = Utils.ex_stack()
+ continue
+
+ if st == ASK_LATER:
+ self.postpone(tsk)
+ elif st == SKIP_ME:
+ self.processed += 1
+ tsk.hasrun = SKIPPED
+ self.manager.add_finished(tsk)
+ else:
+ # run me: put the task in ready queue
+ tsk.position = (self.processed, self.total)
+ self.count += 1
+ tsk.master = self
+ self.processed += 1
+
+ if self.numjobs == 1:
+ process_task(tsk)
+ else:
+ TaskConsumer.ready.put(tsk)
+ # create the consumer threads only if there is something to consume
+ if not TaskConsumer.consumers:
+ TaskConsumer.consumers = [TaskConsumer() for i in xrange(self.numjobs)]
+
+ # self.count represents the tasks that have been made available to the consumer threads
+ # collect all the tasks after an error else the message may be incomplete
+ while self.error and self.count:
+ self.get_out()
+
+ #print loop
+ assert (self.count == 0 or self.stop)
diff --git a/third_party/waf/wafadmin/Scripting.py b/third_party/waf/wafadmin/Scripting.py
new file mode 100644
index 0000000..6f61104
--- /dev/null
+++ b/third_party/waf/wafadmin/Scripting.py
@@ -0,0 +1,585 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2005 (ita)
+
+"Module called for configuring, compiling and installing targets"
+
+import os, sys, shutil, traceback, datetime, inspect, errno
+
+import Utils, Configure, Build, Logs, Options, Environment, Task
+from Logs import error, warn, info
+from Constants import *
+
+g_gz = 'bz2'
+commands = []
+
+def prepare_impl(t, cwd, ver, wafdir):
+ Options.tooldir = [t]
+ Options.launch_dir = cwd
+
+ # some command-line options can be processed immediately
+ if '--version' in sys.argv:
+ opt_obj = Options.Handler()
+ opt_obj.curdir = cwd
+ opt_obj.parse_args()
+ sys.exit(0)
+
+ # now find the wscript file
+ msg1 = 'Waf: Please run waf from a directory containing a file named "%s" or run distclean' % WSCRIPT_FILE
+
+ # in theory projects can be configured in an autotool-like manner:
+ # mkdir build && cd build && ../waf configure && ../waf
+ build_dir_override = None
+ candidate = None
+
+ lst = os.listdir(cwd)
+
+ search_for_candidate = True
+ if WSCRIPT_FILE in lst:
+ candidate = cwd
+
+ elif 'configure' in sys.argv and not WSCRIPT_BUILD_FILE in lst:
+ # autotool-like configuration
+ calldir = os.path.abspath(os.path.dirname(sys.argv[0]))
+ if WSCRIPT_FILE in os.listdir(calldir):
+ candidate = calldir
+ search_for_candidate = False
+ else:
+ error('arg[0] directory does not contain a wscript file')
+ sys.exit(1)
+ build_dir_override = cwd
+
+ # climb up to find a script if it is not found
+ while search_for_candidate:
+ if len(cwd) <= 3:
+ break # stop at / or c:
+ dirlst = os.listdir(cwd)
+ if WSCRIPT_FILE in dirlst:
+ candidate = cwd
+ if 'configure' in sys.argv and candidate:
+ break
+ if Options.lockfile in dirlst:
+ env = Environment.Environment()
+ try:
+ env.load(os.path.join(cwd, Options.lockfile))
+ except:
+ error('could not load %r' % Options.lockfile)
+ try:
+ os.stat(env['cwd'])
+ except:
+ candidate = cwd
+ else:
+ candidate = env['cwd']
+ break
+ cwd = os.path.dirname(cwd) # climb up
+
+ if not candidate:
+ # check if the user only wanted to display the help
+ if '-h' in sys.argv or '--help' in sys.argv:
+ warn('No wscript file found: the help message may be incomplete')
+ opt_obj = Options.Handler()
+ opt_obj.curdir = cwd
+ opt_obj.parse_args()
+ else:
+ error(msg1)
+ sys.exit(0)
+
+ # We have found wscript, but there is no guarantee that it is valid
+ try:
+ os.chdir(candidate)
+ except OSError:
+ raise Utils.WafError("the folder %r is unreadable" % candidate)
+
+ # define the main module containing the functions init, shutdown, ..
+ Utils.set_main_module(os.path.join(candidate, WSCRIPT_FILE))
+
+ if build_dir_override:
+ d = getattr(Utils.g_module, BLDDIR, None)
+ if d:
+ # test if user has set the blddir in wscript.
+ msg = ' Overriding build directory %s with %s' % (d, build_dir_override)
+ warn(msg)
+ Utils.g_module.blddir = build_dir_override
+
+ # bind a few methods and classes by default
+
+ def set_def(obj, name=''):
+ n = name or obj.__name__
+ if not n in Utils.g_module.__dict__:
+ setattr(Utils.g_module, n, obj)
+
+ for k in [dist, distclean, distcheck, clean, install, uninstall]:
+ set_def(k)
+
+ set_def(Configure.ConfigurationContext, 'configure_context')
+
+ for k in ['build', 'clean', 'install', 'uninstall']:
+ set_def(Build.BuildContext, k + '_context')
+
+ # now parse the options from the user wscript file
+ opt_obj = Options.Handler(Utils.g_module)
+ opt_obj.curdir = candidate
+ try:
+ f = Utils.g_module.set_options
+ except AttributeError:
+ pass
+ else:
+ opt_obj.sub_options([''])
+ opt_obj.parse_args()
+
+ if not 'init' in Utils.g_module.__dict__:
+ Utils.g_module.init = Utils.nada
+ if not 'shutdown' in Utils.g_module.__dict__:
+ Utils.g_module.shutdown = Utils.nada
+
+ main()
+
+def prepare(t, cwd, ver, wafdir):
+ if WAFVERSION != ver:
+ msg = 'Version mismatch: waf %s <> wafadmin %s (wafdir %s)' % (ver, WAFVERSION, wafdir)
+ print('\033[91mError: %s\033[0m' % msg)
+ sys.exit(1)
+
+ #"""
+ try:
+ prepare_impl(t, cwd, ver, wafdir)
+ except Utils.WafError, e:
+ error(str(e))
+ sys.exit(1)
+ except KeyboardInterrupt:
+ Utils.pprint('RED', 'Interrupted')
+ sys.exit(68)
+ """
+ import cProfile, pstats
+ cProfile.runctx("import Scripting; Scripting.prepare_impl(t, cwd, ver, wafdir)", {},
+ {'t': t, 'cwd':cwd, 'ver':ver, 'wafdir':wafdir},
+ 'profi.txt')
+ p = pstats.Stats('profi.txt')
+ p.sort_stats('time').print_stats(45)
+ #"""
+
+def main():
+ global commands
+ commands = Options.arg_line[:]
+
+ while commands:
+ x = commands.pop(0)
+
+ ini = datetime.datetime.now()
+ if x == 'configure':
+ fun = configure
+ elif x == 'build':
+ fun = build
+ else:
+ fun = getattr(Utils.g_module, x, None)
+
+ if not fun:
+ raise Utils.WscriptError('No such command %r' % x)
+
+ ctx = getattr(Utils.g_module, x + '_context', Utils.Context)()
+
+ if x in ['init', 'shutdown', 'dist', 'distclean', 'distcheck']:
+ # compatibility TODO remove in waf 1.6
+ try:
+ fun(ctx)
+ except TypeError:
+ fun()
+ else:
+ fun(ctx)
+
+ ela = ''
+ if not Options.options.progress_bar:
+ ela = ' (%s)' % Utils.get_elapsed_time(ini)
+
+ if x != 'init' and x != 'shutdown':
+ info('%r finished successfully%s' % (x, ela))
+
+ if not commands and x != 'shutdown':
+ commands.append('shutdown')
+
+def configure(conf):
+
+ src = getattr(Options.options, SRCDIR, None)
+ if not src: src = getattr(Utils.g_module, SRCDIR, None)
+ if not src: src = getattr(Utils.g_module, 'top', None)
+ if not src:
+ src = '.'
+ incomplete_src = 1
+ src = os.path.abspath(src)
+
+ bld = getattr(Options.options, BLDDIR, None)
+ if not bld: bld = getattr(Utils.g_module, BLDDIR, None)
+ if not bld: bld = getattr(Utils.g_module, 'out', None)
+ if not bld:
+ bld = 'build'
+ incomplete_bld = 1
+ if bld == '.':
+ raise Utils.WafError('Setting blddir="." may cause distclean problems')
+ bld = os.path.abspath(bld)
+
+ try: os.makedirs(bld)
+ except OSError: pass
+
+ # It is not possible to compile specific targets in the configuration
+ # this may cause configuration errors if autoconfig is set
+ targets = Options.options.compile_targets
+ Options.options.compile_targets = None
+ Options.is_install = False
+
+ conf.srcdir = src
+ conf.blddir = bld
+ conf.post_init()
+
+ if 'incomplete_src' in vars():
+ conf.check_message_1('Setting srcdir to')
+ conf.check_message_2(src)
+ if 'incomplete_bld' in vars():
+ conf.check_message_1('Setting blddir to')
+ conf.check_message_2(bld)
+
+ # calling to main wscript's configure()
+ conf.sub_config([''])
+
+ conf.store()
+
+ # this will write a configure lock so that subsequent builds will
+ # consider the current path as the root directory (see prepare_impl).
+ # to remove: use 'waf distclean'
+ env = Environment.Environment()
+ env[BLDDIR] = bld
+ env[SRCDIR] = src
+ env['argv'] = sys.argv
+ env['commands'] = Options.commands
+ env['options'] = Options.options.__dict__
+
+ # conf.hash & conf.files hold wscript files paths and hash
+ # (used only by Configure.autoconfig)
+ env['hash'] = conf.hash
+ env['files'] = conf.files
+ env['environ'] = dict(conf.environ)
+ env['cwd'] = os.path.split(Utils.g_module.root_path)[0]
+
+ if Utils.g_module.root_path != src:
+ # in case the source dir is somewhere else
+ env.store(os.path.join(src, Options.lockfile))
+
+ env.store(Options.lockfile)
+
+ Options.options.compile_targets = targets
+
+def clean(bld):
+ '''removes the build files'''
+ try:
+ proj = Environment.Environment(Options.lockfile)
+ except IOError:
+ raise Utils.WafError('Nothing to clean (project not configured)')
+
+ bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
+ bld.load_envs()
+
+ bld.is_install = 0 # False
+
+ # read the scripts - and set the path to the wscript path (useful for srcdir='/foo/bar')
+ bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]])
+
+ try:
+ bld.clean()
+ finally:
+ bld.save()
+
+def check_configured(bld):
+ if not Configure.autoconfig:
+ return bld
+
+ conf_cls = getattr(Utils.g_module, 'configure_context', Utils.Context)
+ bld_cls = getattr(Utils.g_module, 'build_context', Utils.Context)
+
+ def reconf(proj):
+ back = (Options.commands, Options.options.__dict__, Logs.zones, Logs.verbose)
+
+ Options.commands = proj['commands']
+ Options.options.__dict__ = proj['options']
+ conf = conf_cls()
+ conf.environ = proj['environ']
+ configure(conf)
+
+ (Options.commands, Options.options.__dict__, Logs.zones, Logs.verbose) = back
+
+ try:
+ proj = Environment.Environment(Options.lockfile)
+ except IOError:
+ conf = conf_cls()
+ configure(conf)
+ else:
+ try:
+ bld = bld_cls()
+ bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
+ bld.load_envs()
+ except Utils.WafError:
+ reconf(proj)
+ return bld_cls()
+
+ try:
+ proj = Environment.Environment(Options.lockfile)
+ except IOError:
+ raise Utils.WafError('Auto-config: project does not configure (bug)')
+
+ h = 0
+ try:
+ for file in proj['files']:
+ if file.endswith('configure'):
+ h = hash((h, Utils.readf(file)))
+ else:
+ mod = Utils.load_module(file)
+ h = hash((h, mod.waf_hash_val))
+ except (OSError, IOError):
+ warn('Reconfiguring the project: a file is unavailable')
+ reconf(proj)
+ else:
+ if (h != proj['hash']):
+ warn('Reconfiguring the project: the configuration has changed')
+ reconf(proj)
+
+ return bld_cls()
+
+def install(bld):
+ '''installs the build files'''
+ bld = check_configured(bld)
+
+ Options.commands['install'] = True
+ Options.commands['uninstall'] = False
+ Options.is_install = True
+
+ bld.is_install = INSTALL
+
+ build_impl(bld)
+ bld.install()
+
+def uninstall(bld):
+ '''removes the installed files'''
+ Options.commands['install'] = False
+ Options.commands['uninstall'] = True
+ Options.is_install = True
+
+ bld.is_install = UNINSTALL
+
+ try:
+ def runnable_status(self):
+ return SKIP_ME
+ setattr(Task.Task, 'runnable_status_back', Task.Task.runnable_status)
+ setattr(Task.Task, 'runnable_status', runnable_status)
+
+ build_impl(bld)
+ bld.install()
+ finally:
+ setattr(Task.Task, 'runnable_status', Task.Task.runnable_status_back)
+
+def build(bld):
+ bld = check_configured(bld)
+
+ Options.commands['install'] = False
+ Options.commands['uninstall'] = False
+ Options.is_install = False
+
+ bld.is_install = 0 # False
+
+ return build_impl(bld)
+
+def build_impl(bld):
+ # compile the project and/or install the files
+ try:
+ proj = Environment.Environment(Options.lockfile)
+ except IOError:
+ raise Utils.WafError("Project not configured (run 'waf configure' first)")
+
+ bld.load_dirs(proj[SRCDIR], proj[BLDDIR])
+ bld.load_envs()
+
+ info("Waf: Entering directory `%s'" % bld.bldnode.abspath())
+ bld.add_subdirs([os.path.split(Utils.g_module.root_path)[0]])
+
+ # execute something immediately before the build starts
+ bld.pre_build()
+
+ try:
+ bld.compile()
+ finally:
+ if Options.options.progress_bar: print('')
+ info("Waf: Leaving directory `%s'" % bld.bldnode.abspath())
+
+ # execute something immediately after a successful build
+ bld.post_build()
+
+ bld.install()
+
+excludes = '.bzr .bzrignore .git .gitignore .svn CVS .cvsignore .arch-ids {arch} SCCS BitKeeper .hg _MTN _darcs Makefile Makefile.in config.log .gitattributes .hgignore .hgtags'.split()
+dist_exts = '~ .rej .orig .pyc .pyo .bak .tar.bz2 tar.gz .zip .swp'.split()
+def dont_dist(name, src, build_dir):
+ global excludes, dist_exts
+
+ if (name.startswith(',,')
+ or name.startswith('++')
+ or name.startswith('.waf')
+ or (src == '.' and name == Options.lockfile)
+ or name in excludes
+ or name == build_dir
+ ):
+ return True
+
+ for ext in dist_exts:
+ if name.endswith(ext):
+ return True
+
+ return False
+
+# like shutil.copytree
+# exclude files and to raise exceptions immediately
+def copytree(src, dst, build_dir):
+ names = os.listdir(src)
+ os.makedirs(dst)
+ for name in names:
+ srcname = os.path.join(src, name)
+ dstname = os.path.join(dst, name)
+
+ if dont_dist(name, src, build_dir):
+ continue
+
+ if os.path.isdir(srcname):
+ copytree(srcname, dstname, build_dir)
+ else:
+ shutil.copy2(srcname, dstname)
+
+# TODO in waf 1.6, change this method if "srcdir == blddir" is allowed
+def distclean(ctx=None):
+ '''removes the build directory'''
+ global commands
+ lst = os.listdir('.')
+ for f in lst:
+ if f == Options.lockfile:
+ try:
+ proj = Environment.Environment(f)
+ except:
+ Logs.warn('could not read %r' % f)
+ continue
+
+ try:
+ shutil.rmtree(proj[BLDDIR])
+ except IOError:
+ pass
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ Logs.warn('project %r cannot be removed' % proj[BLDDIR])
+
+ try:
+ os.remove(f)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ Logs.warn('file %r cannot be removed' % f)
+
+ # remove the local waf cache
+ if not commands and f.startswith('.waf'):
+ shutil.rmtree(f, ignore_errors=True)
+
+# FIXME waf 1.6 a unique ctx parameter, and remove the optional appname and version
+def dist(appname='', version=''):
+ '''makes a tarball for redistributing the sources'''
+ # return return (distdirname, tarballname)
+ import tarfile
+
+ if not appname: appname = Utils.g_module.APPNAME
+ if not version: version = Utils.g_module.VERSION
+
+ tmp_folder = appname + '-' + version
+ if g_gz in ['gz', 'bz2']:
+ arch_name = tmp_folder + '.tar.' + g_gz
+ else:
+ arch_name = tmp_folder + '.' + 'zip'
+
+ # remove the previous dir
+ try:
+ shutil.rmtree(tmp_folder)
+ except (OSError, IOError):
+ pass
+
+ # remove the previous archive
+ try:
+ os.remove(arch_name)
+ except (OSError, IOError):
+ pass
+
+ # copy the files into the temporary folder
+ blddir = getattr(Utils.g_module, BLDDIR, None)
+ if not blddir:
+ blddir = getattr(Utils.g_module, 'out', None)
+ copytree('.', tmp_folder, blddir)
+
+ # undocumented hook for additional cleanup
+ dist_hook = getattr(Utils.g_module, 'dist_hook', None)
+ if dist_hook:
+ back = os.getcwd()
+ os.chdir(tmp_folder)
+ try:
+ dist_hook()
+ finally:
+ # go back to the root directory
+ os.chdir(back)
+
+ if g_gz in ['gz', 'bz2']:
+ tar = tarfile.open(arch_name, 'w:' + g_gz)
+ tar.add(tmp_folder)
+ tar.close()
+ else:
+ Utils.zip_folder(tmp_folder, arch_name, tmp_folder)
+
+ try: from hashlib import sha1 as sha
+ except ImportError: from sha import sha
+ try:
+ digest = " (sha=%r)" % sha(Utils.readf(arch_name)).hexdigest()
+ except:
+ digest = ''
+
+ info('New archive created: %s%s' % (arch_name, digest))
+
+ if os.path.exists(tmp_folder): shutil.rmtree(tmp_folder)
+ return arch_name
+
+# FIXME waf 1.6 a unique ctx parameter, and remove the optional appname and version
+def distcheck(appname='', version='', subdir=''):
+ '''checks if the sources compile (tarball from 'dist')'''
+ import tempfile, tarfile
+
+ if not appname: appname = Utils.g_module.APPNAME
+ if not version: version = Utils.g_module.VERSION
+
+ waf = os.path.abspath(sys.argv[0])
+ tarball = dist(appname, version)
+
+ path = appname + '-' + version
+
+ # remove any previous instance
+ if os.path.exists(path):
+ shutil.rmtree(path)
+
+ t = tarfile.open(tarball)
+ for x in t: t.extract(x)
+ t.close()
+
+ # build_path is the directory for the waf invocation
+ if subdir:
+ build_path = os.path.join(path, subdir)
+ else:
+ build_path = path
+
+ instdir = tempfile.mkdtemp('.inst', '%s-%s' % (appname, version))
+ ret = Utils.pproc.Popen([waf, 'configure', 'build', 'install', 'uninstall', '--destdir=' + instdir], cwd=build_path).wait()
+ if ret:
+ raise Utils.WafError('distcheck failed with code %i' % ret)
+
+ if os.path.exists(instdir):
+ raise Utils.WafError('distcheck succeeded, but files were left in %s' % instdir)
+
+ shutil.rmtree(path)
+
+# FIXME remove in Waf 1.6 (kept for compatibility)
+def add_subdir(dir, bld):
+ bld.recurse(dir, 'build')
diff --git a/third_party/waf/wafadmin/Task.py b/third_party/waf/wafadmin/Task.py
new file mode 100644
index 0000000..59d1020
--- /dev/null
+++ b/third_party/waf/wafadmin/Task.py
@@ -0,0 +1,1199 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2005-2008 (ita)
+
+"""
+Running tasks in parallel is a simple problem, but in practice it is more complicated:
+* dependencies discovered during the build (dynamic task creation)
+* dependencies discovered after files are compiled
+* the amount of tasks and dependencies (graph size) can be huge
+
+This is why the dependency management is split on three different levels:
+1. groups of tasks that run all after another group of tasks
+2. groups of tasks that can be run in parallel
+3. tasks that can run in parallel, but with possible unknown ad-hoc dependencies
+
+The point #1 represents a strict sequential order between groups of tasks, for example a compiler is produced
+and used to compile the rest, whereas #2 and #3 represent partial order constraints where #2 applies to the kind of task
+and #3 applies to the task instances.
+
+#1 is held by the task manager: ordered list of TaskGroups (see bld.add_group)
+#2 is held by the task groups and the task types: precedence after/before (topological sort),
+ and the constraints extracted from file extensions
+#3 is held by the tasks individually (attribute run_after),
+ and the scheduler (Runner.py) use Task::runnable_status to reorder the tasks
+
+--
+
+To try, use something like this in your code:
+import Constants, Task
+Task.algotype = Constants.MAXPARALLEL
+
+--
+
+There are two concepts with the tasks (individual units of change):
+* dependency (if 1 is recompiled, recompile 2)
+* order (run 2 after 1)
+
+example 1: if t1 depends on t2 and t2 depends on t3 it is not necessary to make t1 depend on t3 (dependency is transitive)
+example 2: if t1 depends on a node produced by t2, it is not immediately obvious that t1 must run after t2 (order is not obvious)
+
+The role of the Task Manager is to give the tasks in order (groups of task that may be run in parallel one after the other)
+
+"""
+
+import os, shutil, sys, re, random, datetime, tempfile, shlex
+from Utils import md5
+import Build, Runner, Utils, Node, Logs, Options
+from Logs import debug, warn, error
+from Constants import *
+
+algotype = NORMAL
+#algotype = JOBCONTROL
+#algotype = MAXPARALLEL
+
+COMPILE_TEMPLATE_SHELL = '''
+def f(task):
+ env = task.env
+ wd = getattr(task, 'cwd', None)
+ p = env.get_flat
+ cmd = \'\'\' %s \'\'\' % s
+ return task.exec_command(cmd, cwd=wd)
+'''
+
+COMPILE_TEMPLATE_NOSHELL = '''
+def f(task):
+ env = task.env
+ wd = getattr(task, 'cwd', None)
+ def to_list(xx):
+ if isinstance(xx, str): return [xx]
+ return xx
+ lst = []
+ %s
+ lst = [x for x in lst if x]
+ return task.exec_command(lst, cwd=wd)
+'''
+
+
+"""
+Enable different kind of dependency algorithms:
+1 make groups: first compile all cpps and then compile all links (NORMAL)
+2 parallelize all (each link task run after its dependencies) (MAXPARALLEL)
+3 like 1 but provide additional constraints for the parallelization (MAXJOBS)
+
+In theory 1. will be faster than 2 for waf, but might be slower for builds
+The scheme 2 will not allow for running tasks one by one so it can cause disk thrashing on huge builds
+"""
+
+file_deps = Utils.nada
+"""
+Additional dependency pre-check may be added by replacing the function file_deps.
+e.g. extract_outputs, extract_deps below.
+"""
+
+class TaskManager(object):
+ """The manager is attached to the build object, it holds a list of TaskGroup"""
+ def __init__(self):
+ self.groups = []
+ self.tasks_done = []
+ self.current_group = 0
+ self.groups_names = {}
+
+ def group_name(self, g):
+ """name for the group g (utility)"""
+ if not isinstance(g, TaskGroup):
+ g = self.groups[g]
+ for x in self.groups_names:
+ if id(self.groups_names[x]) == id(g):
+ return x
+ return ''
+
+ def group_idx(self, tg):
+ """group the task generator tg is in"""
+ se = id(tg)
+ for i in range(len(self.groups)):
+ g = self.groups[i]
+ for t in g.tasks_gen:
+ if id(t) == se:
+ return i
+ return None
+
+ def get_next_set(self):
+ """return the next set of tasks to execute
+ the first parameter is the maximum amount of parallelization that may occur"""
+ ret = None
+ while not ret and self.current_group < len(self.groups):
+ ret = self.groups[self.current_group].get_next_set()
+ if ret: return ret
+ else:
+ self.groups[self.current_group].process_install()
+ self.current_group += 1
+ return (None, None)
+
+ def add_group(self, name=None, set=True):
+ #if self.groups and not self.groups[0].tasks:
+ # error('add_group: an empty group is already present')
+ g = TaskGroup()
+
+ if name and name in self.groups_names:
+ error('add_group: name %s already present' % name)
+ self.groups_names[name] = g
+ self.groups.append(g)
+ if set:
+ self.current_group = len(self.groups) - 1
+
+ def set_group(self, idx):
+ if isinstance(idx, str):
+ g = self.groups_names[idx]
+ for x in xrange(len(self.groups)):
+ if id(g) == id(self.groups[x]):
+ self.current_group = x
+ else:
+ self.current_group = idx
+
+ def add_task_gen(self, tgen):
+ if not self.groups: self.add_group()
+ self.groups[self.current_group].tasks_gen.append(tgen)
+
+ def add_task(self, task):
+ if not self.groups: self.add_group()
+ self.groups[self.current_group].tasks.append(task)
+
+ def total(self):
+ total = 0
+ if not self.groups: return 0
+ for group in self.groups:
+ total += len(group.tasks)
+ return total
+
+ def add_finished(self, tsk):
+ self.tasks_done.append(tsk)
+ bld = tsk.generator.bld
+ if bld.is_install:
+ f = None
+ if 'install' in tsk.__dict__:
+ f = tsk.__dict__['install']
+ # install=0 to prevent installation
+ if f: f(tsk)
+ else:
+ tsk.install()
+
+class TaskGroup(object):
+ "the compilation of one group does not begin until the previous group has finished (in the manager)"
+ def __init__(self):
+ self.tasks = [] # this list will be consumed
+ self.tasks_gen = []
+
+ self.cstr_groups = Utils.DefaultDict(list) # tasks having equivalent constraints
+ self.cstr_order = Utils.DefaultDict(set) # partial order between the cstr groups
+ self.temp_tasks = [] # tasks put on hold
+ self.ready = 0
+ self.post_funs = []
+
+ def reset(self):
+ "clears the state of the object (put back the tasks into self.tasks)"
+ for x in self.cstr_groups:
+ self.tasks += self.cstr_groups[x]
+ self.tasks = self.temp_tasks + self.tasks
+ self.temp_tasks = []
+ self.cstr_groups = Utils.DefaultDict(list)
+ self.cstr_order = Utils.DefaultDict(set)
+ self.ready = 0
+
+ def process_install(self):
+ for (f, k, kw) in self.post_funs:
+ f(*k, **kw)
+
+ def prepare(self):
+ "prepare the scheduling"
+ self.ready = 1
+ file_deps(self.tasks)
+ self.make_cstr_groups()
+ self.extract_constraints()
+
+ def get_next_set(self):
+ "next list of tasks to execute using max job settings, returns (maxjobs, task_list)"
+ global algotype
+ if algotype == NORMAL:
+ tasks = self.tasks_in_parallel()
+ maxj = MAXJOBS
+ elif algotype == JOBCONTROL:
+ (maxj, tasks) = self.tasks_by_max_jobs()
+ elif algotype == MAXPARALLEL:
+ tasks = self.tasks_with_inner_constraints()
+ maxj = MAXJOBS
+ else:
+ raise Utils.WafError("unknown algorithm type %s" % (algotype))
+
+ if not tasks: return ()
+ return (maxj, tasks)
+
+ def make_cstr_groups(self):
+ "unite the tasks that have similar constraints"
+ self.cstr_groups = Utils.DefaultDict(list)
+ for x in self.tasks:
+ h = x.hash_constraints()
+ self.cstr_groups[h].append(x)
+
+ def set_order(self, a, b):
+ self.cstr_order[a].add(b)
+
+ def compare_exts(self, t1, t2):
+ "extension production"
+ x = "ext_in"
+ y = "ext_out"
+ in_ = t1.attr(x, ())
+ out_ = t2.attr(y, ())
+ for k in in_:
+ if k in out_:
+ return -1
+ in_ = t2.attr(x, ())
+ out_ = t1.attr(y, ())
+ for k in in_:
+ if k in out_:
+ return 1
+ return 0
+
+ def compare_partial(self, t1, t2):
+ "partial relations after/before"
+ m = "after"
+ n = "before"
+ name = t2.__class__.__name__
+ if name in Utils.to_list(t1.attr(m, ())): return -1
+ elif name in Utils.to_list(t1.attr(n, ())): return 1
+ name = t1.__class__.__name__
+ if name in Utils.to_list(t2.attr(m, ())): return 1
+ elif name in Utils.to_list(t2.attr(n, ())): return -1
+ return 0
+
+ def extract_constraints(self):
+ "extract the parallelization constraints from the tasks with different constraints"
+ keys = self.cstr_groups.keys()
+ max = len(keys)
+ # hopefully the length of this list is short
+ for i in xrange(max):
+ t1 = self.cstr_groups[keys[i]][0]
+ for j in xrange(i + 1, max):
+ t2 = self.cstr_groups[keys[j]][0]
+
+ # add the constraints based on the comparisons
+ val = (self.compare_exts(t1, t2)
+ or self.compare_partial(t1, t2)
+ )
+ if val > 0:
+ self.set_order(keys[i], keys[j])
+ elif val < 0:
+ self.set_order(keys[j], keys[i])
+
+ def tasks_in_parallel(self):
+ "(NORMAL) next list of tasks that may be executed in parallel"
+
+ if not self.ready: self.prepare()
+
+ keys = self.cstr_groups.keys()
+
+ unconnected = []
+ remainder = []
+
+ for u in keys:
+ for k in self.cstr_order.values():
+ if u in k:
+ remainder.append(u)
+ break
+ else:
+ unconnected.append(u)
+
+ toreturn = []
+ for y in unconnected:
+ toreturn.extend(self.cstr_groups[y])
+
+ # remove stuff only after
+ for y in unconnected:
+ try: self.cstr_order.__delitem__(y)
+ except KeyError: pass
+ self.cstr_groups.__delitem__(y)
+
+ if not toreturn and remainder:
+ raise Utils.WafError("circular order constraint detected %r" % remainder)
+
+ return toreturn
+
+ def tasks_by_max_jobs(self):
+ "(JOBCONTROL) returns the tasks that can run in parallel with the max amount of jobs"
+ if not self.ready: self.prepare()
+ if not self.temp_tasks: self.temp_tasks = self.tasks_in_parallel()
+ if not self.temp_tasks: return (None, None)
+
+ maxjobs = MAXJOBS
+ ret = []
+ remaining = []
+ for t in self.temp_tasks:
+ m = getattr(t, "maxjobs", getattr(self.__class__, "maxjobs", MAXJOBS))
+ if m > maxjobs:
+ remaining.append(t)
+ elif m < maxjobs:
+ remaining += ret
+ ret = [t]
+ maxjobs = m
+ else:
+ ret.append(t)
+ self.temp_tasks = remaining
+ return (maxjobs, ret)
+
+ def tasks_with_inner_constraints(self):
+ """(MAXPARALLEL) returns all tasks in this group, but add the constraints on each task instance
+ as an optimization, it might be desirable to discard the tasks which do not have to run"""
+ if not self.ready: self.prepare()
+
+ if getattr(self, "done", None): return None
+
+ for p in self.cstr_order:
+ for v in self.cstr_order[p]:
+ for m in self.cstr_groups[p]:
+ for n in self.cstr_groups[v]:
+ n.set_run_after(m)
+ self.cstr_order = Utils.DefaultDict(set)
+ self.cstr_groups = Utils.DefaultDict(list)
+ self.done = 1
+ return self.tasks[:] # make a copy
+
+class store_task_type(type):
+ "store the task types that have a name ending in _task into a map (remember the existing task types)"
+ def __init__(cls, name, bases, dict):
+ super(store_task_type, cls).__init__(name, bases, dict)
+ name = cls.__name__
+
+ if name.endswith('_task'):
+ name = name.replace('_task', '')
+ if name != 'TaskBase':
+ TaskBase.classes[name] = cls
+
+class TaskBase(object):
+ """Base class for all Waf tasks
+
+ The most important methods are (by usual order of call):
+ 1 runnable_status: ask the task if it should be run, skipped, or if we have to ask later
+ 2 __str__: string to display to the user
+ 3 run: execute the task
+ 4 post_run: after the task is run, update the cache about the task
+
+ This class should be seen as an interface, it provides the very minimum necessary for the scheduler
+ so it does not do much.
+
+ For illustration purposes, TaskBase instances try to execute self.fun (if provided)
+ """
+
+ __metaclass__ = store_task_type
+
+ color = "GREEN"
+ maxjobs = MAXJOBS
+ classes = {}
+ stat = None
+
+ def __init__(self, *k, **kw):
+ self.hasrun = NOT_RUN
+
+ try:
+ self.generator = kw['generator']
+ except KeyError:
+ self.generator = self
+ self.bld = Build.bld
+
+ if kw.get('normal', 1):
+ self.generator.bld.task_manager.add_task(self)
+
+ def __repr__(self):
+ "used for debugging"
+ return '\n\t{task: %s %s}' % (self.__class__.__name__, str(getattr(self, "fun", "")))
+
+ def __str__(self):
+ "string to display to the user"
+ if hasattr(self, 'fun'):
+ return 'executing: %s\n' % self.fun.__name__
+ return self.__class__.__name__ + '\n'
+
+ def exec_command(self, *k, **kw):
+ "use this for executing commands from tasks"
+ # TODO in waf 1.6, eliminate bld.exec_command, and move the cwd processing to here
+ if self.env['env']:
+ kw['env'] = self.env['env']
+ return self.generator.bld.exec_command(*k, **kw)
+
+ def runnable_status(self):
+ "RUN_ME SKIP_ME or ASK_LATER"
+ return RUN_ME
+
+ def can_retrieve_cache(self):
+ return False
+
+ def call_run(self):
+ if self.can_retrieve_cache():
+ return 0
+ return self.run()
+
+ def run(self):
+ "called if the task must run"
+ if hasattr(self, 'fun'):
+ return self.fun(self)
+ return 0
+
+ def post_run(self):
+ "update the dependency tree (node stats)"
+ pass
+
+ def display(self):
+ "print either the description (using __str__) or the progress bar or the ide output"
+ col1 = Logs.colors(self.color)
+ col2 = Logs.colors.NORMAL
+
+ if Options.options.progress_bar == 1:
+ return self.generator.bld.progress_line(self.position[0], self.position[1], col1, col2)
+
+ if Options.options.progress_bar == 2:
+ ela = Utils.get_elapsed_time(self.generator.bld.ini)
+ try:
+ ins = ','.join([n.name for n in self.inputs])
+ except AttributeError:
+ ins = ''
+ try:
+ outs = ','.join([n.name for n in self.outputs])
+ except AttributeError:
+ outs = ''
+ return '|Total %s|Current %s|Inputs %s|Outputs %s|Time %s|\n' % (self.position[1], self.position[0], ins, outs, ela)
+
+ total = self.position[1]
+ n = len(str(total))
+ fs = '[%%%dd/%%%dd] %%s%%s%%s' % (n, n)
+ return fs % (self.position[0], self.position[1], col1, str(self), col2)
+
+ def attr(self, att, default=None):
+ "retrieve an attribute from the instance or from the class (microoptimization here)"
+ ret = getattr(self, att, self)
+ if ret is self: return getattr(self.__class__, att, default)
+ return ret
+
+ def hash_constraints(self):
+ "identify a task type for all the constraints relevant for the scheduler: precedence, file production"
+ a = self.attr
+ sum = hash((self.__class__.__name__,
+ str(a('before', '')),
+ str(a('after', '')),
+ str(a('ext_in', '')),
+ str(a('ext_out', '')),
+ self.__class__.maxjobs))
+ return sum
+
+ def format_error(self):
+ "error message to display to the user (when a build fails)"
+ if getattr(self, "err_msg", None):
+ return self.err_msg
+ elif self.hasrun == CRASHED:
+ try:
+ return " -> task failed (err #%d): %r" % (self.err_code, self)
+ except AttributeError:
+ return " -> task failed: %r" % self
+ elif self.hasrun == MISSING:
+ return " -> missing files: %r" % self
+ else:
+ return ''
+
+ def install(self):
+ """
+ installation is performed by looking at the task attributes:
+ * install_path: installation path like "${PREFIX}/bin"
+ * filename: install the first node in the outputs as a file with a particular name, be certain to give os.sep
+ * chmod: permissions
+ """
+ bld = self.generator.bld
+ d = self.attr('install')
+
+ if self.attr('install_path'):
+ lst = [a.relpath_gen(bld.srcnode) for a in self.outputs]
+ perm = self.attr('chmod', O644)
+ if self.attr('src'):
+ # if src is given, install the sources too
+ lst += [a.relpath_gen(bld.srcnode) for a in self.inputs]
+ if self.attr('filename'):
+ dir = self.install_path.rstrip(os.sep) + os.sep + self.attr('filename')
+ bld.install_as(dir, lst[0], self.env, perm)
+ else:
+ bld.install_files(self.install_path, lst, self.env, perm)
+
+class Task(TaskBase):
+ """The parent class is quite limited, in this version:
+ * file system interaction: input and output nodes
+ * persistence: do not re-execute tasks that have already run
+ * caching: same files can be saved and retrieved from a cache directory
+ * dependencies:
+ implicit, like .c files depending on .h files
+ explicit, like the input nodes or the dep_nodes
+ environment variables, like the CXXFLAGS in self.env
+ """
+ vars = []
+ def __init__(self, env, **kw):
+ TaskBase.__init__(self, **kw)
+ self.env = env
+
+ # inputs and outputs are nodes
+ # use setters when possible
+ self.inputs = []
+ self.outputs = []
+
+ self.dep_nodes = []
+ self.run_after = []
+
+ # Additionally, you may define the following
+ #self.dep_vars = 'PREFIX DATADIR'
+
+ def __str__(self):
+ "string to display to the user"
+ env = self.env
+ src_str = ' '.join([a.nice_path(env) for a in self.inputs])
+ tgt_str = ' '.join([a.nice_path(env) for a in self.outputs])
+ if self.outputs: sep = ' -> '
+ else: sep = ''
+ return '%s: %s%s%s\n' % (self.__class__.__name__.replace('_task', ''), src_str, sep, tgt_str)
+
+ def __repr__(self):
+ return "".join(['\n\t{task: ', self.__class__.__name__, " ", ",".join([x.name for x in self.inputs]), " -> ", ",".join([x.name for x in self.outputs]), '}'])
+
+ def unique_id(self):
+ "get a unique id: hash the node paths, the variant, the class, the function"
+ try:
+ return self.uid
+ except AttributeError:
+ "this is not a real hot zone, but we want to avoid surprizes here"
+ m = md5()
+ up = m.update
+ up(self.__class__.__name__)
+ up(self.env.variant())
+ p = None
+ for x in self.inputs + self.outputs:
+ if p != x.parent.id:
+ p = x.parent.id
+ up(x.parent.abspath())
+ up(x.name)
+ self.uid = m.digest()
+ return self.uid
+
+ def set_inputs(self, inp):
+ if isinstance(inp, list): self.inputs += inp
+ else: self.inputs.append(inp)
+
+ def set_outputs(self, out):
+ if isinstance(out, list): self.outputs += out
+ else: self.outputs.append(out)
+
+ def set_run_after(self, task):
+ "set (scheduler) order on another task"
+ # TODO: handle list or object
+ assert isinstance(task, TaskBase)
+ self.run_after.append(task)
+
+ def add_file_dependency(self, filename):
+ "TODO user-provided file dependencies"
+ node = self.generator.bld.path.find_resource(filename)
+ self.dep_nodes.append(node)
+
+ def signature(self):
+ # compute the result one time, and suppose the scan_signature will give the good result
+ try: return self.cache_sig[0]
+ except AttributeError: pass
+
+ self.m = md5()
+
+ # explicit deps
+ exp_sig = self.sig_explicit_deps()
+
+ # env vars
+ var_sig = self.sig_vars()
+
+ # implicit deps
+
+ imp_sig = SIG_NIL
+ if self.scan:
+ try:
+ imp_sig = self.sig_implicit_deps()
+ except ValueError:
+ return self.signature()
+
+ # we now have the signature (first element) and the details (for debugging)
+ ret = self.m.digest()
+ self.cache_sig = (ret, exp_sig, imp_sig, var_sig)
+ return ret
+
+ def runnable_status(self):
+ "SKIP_ME RUN_ME or ASK_LATER"
+ #return 0 # benchmarking
+
+ if self.inputs and (not self.outputs):
+ if not getattr(self.__class__, 'quiet', None):
+ warn("invalid task (no inputs OR outputs): override in a Task subclass or set the attribute 'quiet' %r" % self)
+
+ for t in self.run_after:
+ if not t.hasrun:
+ return ASK_LATER
+
+ env = self.env
+ bld = self.generator.bld
+
+ # first compute the signature
+ new_sig = self.signature()
+
+ # compare the signature to a signature computed previously
+ key = self.unique_id()
+ try:
+ prev_sig = bld.task_sigs[key][0]
+ except KeyError:
+ debug("task: task %r must run as it was never run before or the task code changed", self)
+ return RUN_ME
+
+ # compare the signatures of the outputs
+ for node in self.outputs:
+ variant = node.variant(env)
+ try:
+ if bld.node_sigs[variant][node.id] != new_sig:
+ return RUN_ME
+ except KeyError:
+ debug("task: task %r must run as the output nodes do not exist", self)
+ return RUN_ME
+
+ # debug if asked to
+ if Logs.verbose: self.debug_why(bld.task_sigs[key])
+
+ if new_sig != prev_sig:
+ return RUN_ME
+ return SKIP_ME
+
+ def post_run(self):
+ "called after a successful task run"
+ bld = self.generator.bld
+ env = self.env
+ sig = self.signature()
+ ssig = sig.encode('hex')
+
+ variant = env.variant()
+ for node in self.outputs:
+ # check if the node exists ..
+ try:
+ os.stat(node.abspath(env))
+ except OSError:
+ self.hasrun = MISSING
+ self.err_msg = '-> missing file: %r' % node.abspath(env)
+ raise Utils.WafError
+
+ # important, store the signature for the next run
+ bld.node_sigs[variant][node.id] = sig
+ bld.task_sigs[self.unique_id()] = self.cache_sig
+
+ # file caching, if possible
+ # try to avoid data corruption as much as possible
+ if not Options.cache_global or Options.options.nocache or not self.outputs:
+ return None
+
+ if getattr(self, 'cached', None):
+ return None
+
+ dname = os.path.join(Options.cache_global, ssig)
+ tmpdir = tempfile.mkdtemp(prefix=Options.cache_global + os.sep + 'waf')
+
+ try:
+ shutil.rmtree(dname)
+ except:
+ pass
+
+ try:
+ i = 0
+ for node in self.outputs:
+ variant = node.variant(env)
+ dest = os.path.join(tmpdir, str(i) + node.name)
+ shutil.copy2(node.abspath(env), dest)
+ i += 1
+ except (OSError, IOError):
+ try:
+ shutil.rmtree(tmpdir)
+ except:
+ pass
+ else:
+ try:
+ os.rename(tmpdir, dname)
+ except OSError:
+ try:
+ shutil.rmtree(tmpdir)
+ except:
+ pass
+ else:
+ try:
+ os.chmod(dname, O755)
+ except:
+ pass
+
+ def can_retrieve_cache(self):
+ """
+ Retrieve build nodes from the cache
+ update the file timestamps to help cleaning the least used entries from the cache
+ additionally, set an attribute 'cached' to avoid re-creating the same cache files
+
+ suppose there are files in cache/dir1/file1 and cache/dir2/file2
+ first, read the timestamp of dir1
+ then try to copy the files
+ then look at the timestamp again, if it has changed, the data may have been corrupt (cache update by another process)
+ should an exception occur, ignore the data
+ """
+ if not Options.cache_global or Options.options.nocache or not self.outputs:
+ return None
+
+ env = self.env
+ sig = self.signature()
+ ssig = sig.encode('hex')
+
+ # first try to access the cache folder for the task
+ dname = os.path.join(Options.cache_global, ssig)
+ try:
+ t1 = os.stat(dname).st_mtime
+ except OSError:
+ return None
+
+ i = 0
+ for node in self.outputs:
+ variant = node.variant(env)
+
+ orig = os.path.join(dname, str(i) + node.name)
+ try:
+ shutil.copy2(orig, node.abspath(env))
+ # mark the cache file as used recently (modified)
+ os.utime(orig, None)
+ except (OSError, IOError):
+ debug('task: failed retrieving file')
+ return None
+ i += 1
+
+ # is it the same folder?
+ try:
+ t2 = os.stat(dname).st_mtime
+ except OSError:
+ return None
+
+ if t1 != t2:
+ return None
+
+ for node in self.outputs:
+ self.generator.bld.node_sigs[variant][node.id] = sig
+ if Options.options.progress_bar < 1:
+ self.generator.bld.printout('restoring from cache %r\n' % node.bldpath(env))
+
+ self.cached = True
+ return 1
+
+ def debug_why(self, old_sigs):
+ "explains why a task is run"
+
+ new_sigs = self.cache_sig
+ def v(x):
+ return x.encode('hex')
+
+ debug("Task %r", self)
+ msgs = ['Task must run', '* Source file or manual dependency', '* Implicit dependency', '* Environment variable']
+ tmp = 'task: -> %s: %s %s'
+ for x in xrange(len(msgs)):
+ if (new_sigs[x] != old_sigs[x]):
+ debug(tmp, msgs[x], v(old_sigs[x]), v(new_sigs[x]))
+
+ def sig_explicit_deps(self):
+ bld = self.generator.bld
+ up = self.m.update
+
+ # the inputs
+ for x in self.inputs + getattr(self, 'dep_nodes', []):
+ if not x.parent.id in bld.cache_scanned_folders:
+ bld.rescan(x.parent)
+
+ variant = x.variant(self.env)
+ try:
+ up(bld.node_sigs[variant][x.id])
+ except KeyError:
+ raise Utils.WafError('Missing node signature for %r (required by %r)' % (x, self))
+
+ # manual dependencies, they can slow down the builds
+ if bld.deps_man:
+ additional_deps = bld.deps_man
+ for x in self.inputs + self.outputs:
+ try:
+ d = additional_deps[x.id]
+ except KeyError:
+ continue
+
+ for v in d:
+ if isinstance(v, Node.Node):
+ bld.rescan(v.parent)
+ variant = v.variant(self.env)
+ try:
+ v = bld.node_sigs[variant][v.id]
+ except KeyError:
+ raise Utils.WafError('Missing node signature for %r (required by %r)' % (v, self))
+ elif hasattr(v, '__call__'):
+ v = v() # dependency is a function, call it
+ up(v)
+
+ for x in self.dep_nodes:
+ v = bld.node_sigs[x.variant(self.env)][x.id]
+ up(v)
+
+ return self.m.digest()
+
+ def sig_vars(self):
+ bld = self.generator.bld
+ env = self.env
+
+ # dependencies on the environment vars
+ act_sig = bld.hash_env_vars(env, self.__class__.vars)
+ self.m.update(act_sig)
+
+ # additional variable dependencies, if provided
+ dep_vars = getattr(self, 'dep_vars', None)
+ if dep_vars:
+ self.m.update(bld.hash_env_vars(env, dep_vars))
+
+ return self.m.digest()
+
+ #def scan(self, node):
+ # """this method returns a tuple containing:
+ # * a list of nodes corresponding to real files
+ # * a list of names for files not found in path_lst
+ # the input parameters may have more parameters that the ones used below
+ # """
+ # return ((), ())
+ scan = None
+
+ # compute the signature, recompute it if there is no match in the cache
+ def sig_implicit_deps(self):
+ "the signature obtained may not be the one if the files have changed, we do it in two steps"
+
+ bld = self.generator.bld
+
+ # get the task signatures from previous runs
+ key = self.unique_id()
+ prev_sigs = bld.task_sigs.get(key, ())
+ if prev_sigs:
+ try:
+ # for issue #379
+ if prev_sigs[2] == self.compute_sig_implicit_deps():
+ return prev_sigs[2]
+ except (KeyError, OSError):
+ pass
+ del bld.task_sigs[key]
+ raise ValueError('rescan')
+
+ # no previous run or the signature of the dependencies has changed, rescan the dependencies
+ (nodes, names) = self.scan()
+ if Logs.verbose:
+ debug('deps: scanner for %s returned %s %s', str(self), str(nodes), str(names))
+
+ # store the dependencies in the cache
+ bld.node_deps[key] = nodes
+ bld.raw_deps[key] = names
+
+ # recompute the signature and return it
+ try:
+ sig = self.compute_sig_implicit_deps()
+ except KeyError:
+ try:
+ nodes = []
+ for k in bld.node_deps.get(self.unique_id(), []):
+ if k.id & 3 == 2: # Node.FILE:
+ if not k.id in bld.node_sigs[0]:
+ nodes.append(k)
+ else:
+ if not k.id in bld.node_sigs[self.env.variant()]:
+ nodes.append(k)
+ except:
+ nodes = '?'
+ raise Utils.WafError('Missing node signature for %r (for implicit dependencies %r)' % (nodes, self))
+
+ return sig
+
+ def compute_sig_implicit_deps(self):
+ """it is intended for .cpp and inferred .h files
+ there is a single list (no tree traversal)
+ this is the hot spot so ... do not touch"""
+ upd = self.m.update
+
+ bld = self.generator.bld
+ tstamp = bld.node_sigs
+ env = self.env
+
+ for k in bld.node_deps.get(self.unique_id(), []):
+ # unlikely but necessary if it happens
+ if not k.parent.id in bld.cache_scanned_folders:
+ # if the parent folder is removed, an OSError may be thrown
+ bld.rescan(k.parent)
+
+ # if the parent folder is removed, a KeyError will be thrown
+ if k.id & 3 == 2: # Node.FILE:
+ upd(tstamp[0][k.id])
+ else:
+ upd(tstamp[env.variant()][k.id])
+
+ return self.m.digest()
+
+def funex(c):
+ dc = {}
+ exec(c, dc)
+ return dc['f']
+
+reg_act = re.compile(r"(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})", re.M)
+def compile_fun_shell(name, line):
+ """Compiles a string (once) into a function, eg:
+ simple_task_type('c++', '${CXX} -o ${TGT[0]} ${SRC} -I ${SRC[0].parent.bldpath()}')
+
+ The env variables (CXX, ..) on the task must not hold dicts (order)
+ The reserved keywords TGT and SRC represent the task input and output nodes
+
+ quick test:
+ bld(source='wscript', rule='echo "foo\\${SRC[0].name}\\bar"')
+ """
+
+ extr = []
+ def repl(match):
+ g = match.group
+ if g('dollar'): return "$"
+ elif g('backslash'): return '\\\\'
+ elif g('subst'): extr.append((g('var'), g('code'))); return "%s"
+ return None
+
+ line = reg_act.sub(repl, line) or line
+
+ parm = []
+ dvars = []
+ app = parm.append
+ for (var, meth) in extr:
+ if var == 'SRC':
+ if meth: app('task.inputs%s' % meth)
+ else: app('" ".join([a.srcpath(env) for a in task.inputs])')
+ elif var == 'TGT':
+ if meth: app('task.outputs%s' % meth)
+ else: app('" ".join([a.bldpath(env) for a in task.outputs])')
+ else:
+ if not var in dvars: dvars.append(var)
+ app("p('%s')" % var)
+ if parm: parm = "%% (%s) " % (',\n\t\t'.join(parm))
+ else: parm = ''
+
+ c = COMPILE_TEMPLATE_SHELL % (line, parm)
+
+ debug('action: %s', c)
+ return (funex(c), dvars)
+
+def compile_fun_noshell(name, line):
+
+ extr = []
+ def repl(match):
+ g = match.group
+ if g('dollar'): return "$"
+ elif g('subst'): extr.append((g('var'), g('code'))); return "<<|@|>>"
+ return None
+
+ line2 = reg_act.sub(repl, line)
+ params = line2.split('<<|@|>>')
+
+ buf = []
+ dvars = []
+ app = buf.append
+ for x in xrange(len(extr)):
+ params[x] = params[x].strip()
+ if params[x]:
+ app("lst.extend(%r)" % params[x].split())
+ (var, meth) = extr[x]
+ if var == 'SRC':
+ if meth: app('lst.append(task.inputs%s)' % meth)
+ else: app("lst.extend([a.srcpath(env) for a in task.inputs])")
+ elif var == 'TGT':
+ if meth: app('lst.append(task.outputs%s)' % meth)
+ else: app("lst.extend([a.bldpath(env) for a in task.outputs])")
+ else:
+ app('lst.extend(to_list(env[%r]))' % var)
+ if not var in dvars: dvars.append(var)
+
+ if params[-1]:
+ app("lst.extend(%r)" % shlex.split(params[-1]))
+
+ fun = COMPILE_TEMPLATE_NOSHELL % "\n\t".join(buf)
+ debug('action: %s', fun)
+ return (funex(fun), dvars)
+
+def compile_fun(name, line, shell=None):
+ "commands can be launched by the shell or not"
+ if line.find('<') > 0 or line.find('>') > 0 or line.find('&&') > 0:
+ shell = True
+ #else:
+ # shell = False
+
+ if shell is None:
+ if sys.platform == 'win32':
+ shell = False
+ else:
+ shell = True
+
+ if shell:
+ return compile_fun_shell(name, line)
+ else:
+ return compile_fun_noshell(name, line)
+
+def simple_task_type(name, line, color='GREEN', vars=[], ext_in=[], ext_out=[], before=[], after=[], shell=None):
+ """return a new Task subclass with the function run compiled from the line given"""
+ (fun, dvars) = compile_fun(name, line, shell)
+ fun.code = line
+ return task_type_from_func(name, fun, vars or dvars, color, ext_in, ext_out, before, after)
+
+def task_type_from_func(name, func, vars=[], color='GREEN', ext_in=[], ext_out=[], before=[], after=[]):
+ """return a new Task subclass with the function run compiled from the line given"""
+ params = {
+ 'run': func,
+ 'vars': vars,
+ 'color': color,
+ 'name': name,
+ 'ext_in': Utils.to_list(ext_in),
+ 'ext_out': Utils.to_list(ext_out),
+ 'before': Utils.to_list(before),
+ 'after': Utils.to_list(after),
+ }
+
+ cls = type(Task)(name, (Task,), params)
+ TaskBase.classes[name] = cls
+ return cls
+
+def always_run(cls):
+ """Set all task instances of this class to be executed whenever a build is started
+ The task signature is calculated, but the result of the comparation between
+ task signatures is bypassed
+ """
+ old = cls.runnable_status
+ def always(self):
+ ret = old(self)
+ if ret == SKIP_ME:
+ return RUN_ME
+ return ret
+ cls.runnable_status = always
+
+def update_outputs(cls):
+ """When a command is always run, it is possible that the output only change
+ sometimes. By default the build node have as a hash the signature of the task
+ which may not change. With this, the output nodes (produced) are hashed,
+ and the hashes are set to the build nodes
+
+ This may avoid unnecessary recompilations, but it uses more resources
+ (hashing the output files) so it is not used by default
+ """
+ old_post_run = cls.post_run
+ def post_run(self):
+ old_post_run(self)
+ bld = self.generator.bld
+ for output in self.outputs:
+ bld.node_sigs[self.env.variant()][output.id] = Utils.h_file(output.abspath(self.env))
+ bld.task_sigs[output.id] = self.unique_id()
+ cls.post_run = post_run
+
+ old_runnable_status = cls.runnable_status
+ def runnable_status(self):
+ status = old_runnable_status(self)
+ if status != RUN_ME:
+ return status
+
+ uid = self.unique_id()
+ try:
+ bld = self.outputs[0].__class__.bld
+ new_sig = self.signature()
+ prev_sig = bld.task_sigs[uid][0]
+ if prev_sig == new_sig:
+ for x in self.outputs:
+ if not x.id in bld.node_sigs[self.env.variant()]:
+ return RUN_ME
+ if bld.task_sigs[x.id] != uid: # ensure the outputs are associated with *this* task
+ return RUN_ME
+ return SKIP_ME
+ except KeyError:
+ pass
+ except IndexError:
+ pass
+ return RUN_ME
+ cls.runnable_status = runnable_status
+
+def extract_outputs(tasks):
+ """file_deps: Infer additional dependencies from task input and output nodes
+ """
+ v = {}
+ for x in tasks:
+ try:
+ (ins, outs) = v[x.env.variant()]
+ except KeyError:
+ ins = {}
+ outs = {}
+ v[x.env.variant()] = (ins, outs)
+
+ for a in getattr(x, 'inputs', []):
+ try: ins[a.id].append(x)
+ except KeyError: ins[a.id] = [x]
+ for a in getattr(x, 'outputs', []):
+ try: outs[a.id].append(x)
+ except KeyError: outs[a.id] = [x]
+
+ for (ins, outs) in v.values():
+ links = set(ins.iterkeys()).intersection(outs.iterkeys())
+ for k in links:
+ for a in ins[k]:
+ for b in outs[k]:
+ a.set_run_after(b)
+
+def extract_deps(tasks):
+ """file_deps: Infer additional dependencies from task input and output nodes and from implicit dependencies
+ returned by the scanners - that will only work if all tasks are created
+
+ this is aimed at people who have pathological builds and who do not care enough
+ to implement the build dependencies properly
+
+ with two loops over the list of tasks, do not expect this to be really fast
+ """
+
+ # first reuse the function above
+ extract_outputs(tasks)
+
+ # map the output nodes to the tasks producing them
+ out_to_task = {}
+ for x in tasks:
+ v = x.env.variant()
+ try:
+ lst = x.outputs
+ except AttributeError:
+ pass
+ else:
+ for node in lst:
+ out_to_task[(v, node.id)] = x
+
+ # map the dependencies found to the tasks compiled
+ dep_to_task = {}
+ for x in tasks:
+ try:
+ x.signature()
+ except: # this is on purpose
+ pass
+
+ v = x.env.variant()
+ key = x.unique_id()
+ for k in x.generator.bld.node_deps.get(x.unique_id(), []):
+ try: dep_to_task[(v, k.id)].append(x)
+ except KeyError: dep_to_task[(v, k.id)] = [x]
+
+ # now get the intersection
+ deps = set(dep_to_task.keys()).intersection(set(out_to_task.keys()))
+
+ # and add the dependencies from task to task
+ for idx in deps:
+ for k in dep_to_task[idx]:
+ k.set_run_after(out_to_task[idx])
+
+ # cleanup, remove the signatures
+ for x in tasks:
+ try:
+ delattr(x, 'cache_sig')
+ except AttributeError:
+ pass
diff --git a/third_party/waf/wafadmin/TaskGen.py b/third_party/waf/wafadmin/TaskGen.py
new file mode 100644
index 0000000..5900809
--- /dev/null
+++ b/third_party/waf/wafadmin/TaskGen.py
@@ -0,0 +1,611 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2005-2008 (ita)
+
+"""
+The class task_gen encapsulates the creation of task objects (low-level code)
+The instances can have various parameters, but the creation of task nodes (Task.py)
+is delayed. To achieve this, various methods are called from the method "apply"
+
+The class task_gen contains lots of methods, and a configuration table:
+* the methods to call (self.meths) can be specified dynamically (removing, adding, ..)
+* the order of the methods (self.prec or by default task_gen.prec) is configurable
+* new methods can be inserted dynamically without pasting old code
+
+Additionally, task_gen provides the method apply_core
+* file extensions are mapped to methods: def meth(self, name_or_node)
+* if a mapping is not found in self.mappings, it is searched in task_gen.mappings
+* when called, the functions may modify self.allnodes to re-add source to process
+* the mappings can map an extension or a filename (see the code below)
+
+WARNING: subclasses must reimplement the clone method
+"""
+
+import os, traceback, copy
+import Build, Task, Utils, Logs, Options
+from Logs import debug, error, warn
+from Constants import *
+
+typos = {
+'sources':'source',
+'targets':'target',
+'include':'includes',
+'define':'defines',
+'importpath':'importpaths',
+'install_var':'install_path',
+'install_subdir':'install_path',
+'inst_var':'install_path',
+'inst_dir':'install_path',
+'feature':'features',
+}
+
+class register_obj(type):
+ """no decorators for classes, so we use a metaclass
+ we store into task_gen.classes the classes that inherit task_gen
+ and whose names end in '_taskgen'
+ """
+ def __init__(cls, name, bases, dict):
+ super(register_obj, cls).__init__(name, bases, dict)
+ name = cls.__name__
+ suffix = '_taskgen'
+ if name.endswith(suffix):
+ task_gen.classes[name.replace(suffix, '')] = cls
+
+class task_gen(object):
+ """
+ Most methods are of the form 'def meth(self):' without any parameters
+ there are many of them, and they do many different things:
+ * task creation
+ * task results installation
+ * environment modification
+ * attribute addition/removal
+
+ The inheritance approach is complicated
+ * mixing several languages at once
+ * subclassing is needed even for small changes
+ * inserting new methods is complicated
+
+ This new class uses a configuration table:
+ * adding new methods easily
+ * obtaining the order in which to call the methods
+ * postponing the method calls (post() -> apply)
+
+ Additionally, a 'traits' static attribute is provided:
+ * this list contains methods
+ * the methods can remove or add methods from self.meths
+ Example1: the attribute 'staticlib' is set on an instance
+ a method set in the list of traits is executed when the
+ instance is posted, it finds that flag and adds another method for execution
+ Example2: a method set in the list of traits finds the msvc
+ compiler (from self.env['MSVC']==1); more methods are added to self.meths
+ """
+
+ __metaclass__ = register_obj
+ mappings = {}
+ mapped = {}
+ prec = Utils.DefaultDict(list)
+ traits = Utils.DefaultDict(set)
+ classes = {}
+
+ def __init__(self, *kw, **kwargs):
+ self.prec = Utils.DefaultDict(list)
+ "map precedence of function names to call"
+ # so we will have to play with directed acyclic graphs
+ # detect cycles, etc
+
+ self.source = ''
+ self.target = ''
+
+ # list of methods to execute - does not touch it by hand unless you know
+ self.meths = []
+
+ # list of mappings extension -> function
+ self.mappings = {}
+
+ # list of features (see the documentation on traits)
+ self.features = list(kw)
+
+ # not always a good idea
+ self.tasks = []
+
+ self.default_chmod = O644
+ self.default_install_path = None
+
+ # kind of private, beware of what you put in it, also, the contents are consumed
+ self.allnodes = []
+
+ self.bld = kwargs.get('bld', Build.bld)
+ self.env = self.bld.env.copy()
+
+ self.path = self.bld.path # emulate chdir when reading scripts
+ self.name = '' # give a name to the target (static+shlib with the same targetname ambiguity)
+
+ # provide a unique id
+ self.idx = self.bld.idx[self.path.id] = self.bld.idx.get(self.path.id, 0) + 1
+
+ for key, val in kwargs.iteritems():
+ setattr(self, key, val)
+
+ self.bld.task_manager.add_task_gen(self)
+ self.bld.all_task_gen.append(self)
+
+ def __str__(self):
+ return ("<task_gen '%s' of type %s defined in %s>"
+ % (self.name or self.target, self.__class__.__name__, str(self.path)))
+
+ def __setattr__(self, name, attr):
+ real = typos.get(name, name)
+ if real != name:
+ warn('typo %s -> %s' % (name, real))
+ if Logs.verbose > 0:
+ traceback.print_stack()
+ object.__setattr__(self, real, attr)
+
+ def to_list(self, value):
+ "helper: returns a list"
+ if isinstance(value, str): return value.split()
+ else: return value
+
+ def apply(self):
+ "order the methods to execute using self.prec or task_gen.prec"
+ keys = set(self.meths)
+
+ # add the methods listed in the features
+ self.features = Utils.to_list(self.features)
+ for x in self.features + ['*']:
+ st = task_gen.traits[x]
+ if not st:
+ warn('feature %r does not exist - bind at least one method to it' % x)
+ keys.update(st)
+
+ # copy the precedence table
+ prec = {}
+ prec_tbl = self.prec or task_gen.prec
+ for x in prec_tbl:
+ if x in keys:
+ prec[x] = prec_tbl[x]
+
+ # elements disconnected
+ tmp = []
+ for a in keys:
+ for x in prec.values():
+ if a in x: break
+ else:
+ tmp.append(a)
+
+ # topological sort
+ out = []
+ while tmp:
+ e = tmp.pop()
+ if e in keys: out.append(e)
+ try:
+ nlst = prec[e]
+ except KeyError:
+ pass
+ else:
+ del prec[e]
+ for x in nlst:
+ for y in prec:
+ if x in prec[y]:
+ break
+ else:
+ tmp.append(x)
+
+ if prec: raise Utils.WafError("graph has a cycle %s" % str(prec))
+ out.reverse()
+ self.meths = out
+
+ # then we run the methods in order
+ debug('task_gen: posting %s %d', self, id(self))
+ for x in out:
+ try:
+ v = getattr(self, x)
+ except AttributeError:
+ raise Utils.WafError("tried to retrieve %s which is not a valid method" % x)
+ debug('task_gen: -> %s (%d)', x, id(self))
+ v()
+
+ def post(self):
+ "runs the code to create the tasks, do not subclass"
+ if not self.name:
+ if isinstance(self.target, list):
+ self.name = ' '.join(self.target)
+ else:
+ self.name = self.target
+
+ if getattr(self, 'posted', None):
+ #error("OBJECT ALREADY POSTED" + str( self))
+ return
+
+ self.apply()
+ self.posted = True
+ debug('task_gen: posted %s', self.name)
+
+ def get_hook(self, ext):
+ try: return self.mappings[ext]
+ except KeyError:
+ try: return task_gen.mappings[ext]
+ except KeyError: return None
+
+ # TODO waf 1.6: always set the environment
+ # TODO waf 1.6: create_task(self, name, inputs, outputs)
+ def create_task(self, name, src=None, tgt=None, env=None):
+ env = env or self.env
+ task = Task.TaskBase.classes[name](env.copy(), generator=self)
+ if src:
+ task.set_inputs(src)
+ if tgt:
+ task.set_outputs(tgt)
+ self.tasks.append(task)
+ return task
+
+ def name_to_obj(self, name):
+ return self.bld.name_to_obj(name, self.env)
+
+ def find_sources_in_dirs(self, dirnames, excludes=[], exts=[]):
+ """
+ The attributes "excludes" and "exts" must be lists to avoid the confusion
+ find_sources_in_dirs('a', 'b', 'c') <-> find_sources_in_dirs('a b c')
+
+ do not use absolute paths
+ do not use paths outside of the source tree
+ the files or folder beginning by . are not returned
+
+ # TODO: remove in Waf 1.6
+ """
+
+ err_msg = "'%s' attribute must be a list"
+ if not isinstance(excludes, list):
+ raise Utils.WscriptError(err_msg % 'excludes')
+ if not isinstance(exts, list):
+ raise Utils.WscriptError(err_msg % 'exts')
+
+ lst = []
+
+ #make sure dirnames is a list helps with dirnames with spaces
+ dirnames = self.to_list(dirnames)
+
+ ext_lst = exts or list(self.mappings.keys()) + list(task_gen.mappings.keys())
+
+ for name in dirnames:
+ anode = self.path.find_dir(name)
+
+ if not anode or not anode.is_child_of(self.bld.srcnode):
+ raise Utils.WscriptError("Unable to use '%s' - either because it's not a relative path" \
+ ", or it's not child of '%s'." % (name, self.bld.srcnode))
+
+ self.bld.rescan(anode)
+ for name in self.bld.cache_dir_contents[anode.id]:
+
+ # ignore hidden files
+ if name.startswith('.'):
+ continue
+
+ (base, ext) = os.path.splitext(name)
+ if ext in ext_lst and not name in lst and not name in excludes:
+ lst.append((anode.relpath_gen(self.path) or '.') + os.path.sep + name)
+
+ lst.sort()
+ self.source = self.to_list(self.source)
+ if not self.source: self.source = lst
+ else: self.source += lst
+
+ def clone(self, env):
+ """when creating a clone in a task generator method,
+ make sure to set posted=False on the clone
+ else the other task generator will not create its tasks"""
+ newobj = task_gen(bld=self.bld)
+ for x in self.__dict__:
+ if x in ['env', 'bld']:
+ continue
+ elif x in ["path", "features"]:
+ setattr(newobj, x, getattr(self, x))
+ else:
+ setattr(newobj, x, copy.copy(getattr(self, x)))
+
+ newobj.__class__ = self.__class__
+ if isinstance(env, str):
+ newobj.env = self.bld.all_envs[env].copy()
+ else:
+ newobj.env = env.copy()
+
+ return newobj
+
+ def get_inst_path(self):
+ return getattr(self, '_install_path', getattr(self, 'default_install_path', ''))
+
+ def set_inst_path(self, val):
+ self._install_path = val
+
+ install_path = property(get_inst_path, set_inst_path)
+
+
+ def get_chmod(self):
+ return getattr(self, '_chmod', getattr(self, 'default_chmod', O644))
+
+ def set_chmod(self, val):
+ self._chmod = val
+
+ chmod = property(get_chmod, set_chmod)
+
+def declare_extension(var, func):
+ try:
+ for x in Utils.to_list(var):
+ task_gen.mappings[x] = func
+ except:
+ raise Utils.WscriptError('declare_extension takes either a list or a string %r' % var)
+ task_gen.mapped[func.__name__] = func
+
+def declare_order(*k):
+ assert(len(k) > 1)
+ n = len(k) - 1
+ for i in xrange(n):
+ f1 = k[i]
+ f2 = k[i+1]
+ if not f1 in task_gen.prec[f2]:
+ task_gen.prec[f2].append(f1)
+
+def declare_chain(name='', action='', ext_in='', ext_out='', reentrant=True, color='BLUE',
+ install=0, before=[], after=[], decider=None, rule=None, scan=None):
+ """
+ see Tools/flex.py for an example
+ while i do not like such wrappers, some people really do
+ """
+
+ action = action or rule
+ if isinstance(action, str):
+ act = Task.simple_task_type(name, action, color=color)
+ else:
+ act = Task.task_type_from_func(name, action, color=color)
+ act.ext_in = tuple(Utils.to_list(ext_in))
+ act.ext_out = tuple(Utils.to_list(ext_out))
+ act.before = Utils.to_list(before)
+ act.after = Utils.to_list(after)
+ act.scan = scan
+
+ def x_file(self, node):
+ if decider:
+ ext = decider(self, node)
+ else:
+ ext = ext_out
+
+ if isinstance(ext, str):
+ out_source = node.change_ext(ext)
+ if reentrant:
+ self.allnodes.append(out_source)
+ elif isinstance(ext, list):
+ out_source = [node.change_ext(x) for x in ext]
+ if reentrant:
+ for i in xrange((reentrant is True) and len(out_source) or reentrant):
+ self.allnodes.append(out_source[i])
+ else:
+ # XXX: useless: it will fail on Utils.to_list above...
+ raise Utils.WafError("do not know how to process %s" % str(ext))
+
+ tsk = self.create_task(name, node, out_source)
+
+ if node.__class__.bld.is_install:
+ tsk.install = install
+
+ declare_extension(act.ext_in, x_file)
+ return x_file
+
+def bind_feature(name, methods):
+ lst = Utils.to_list(methods)
+ task_gen.traits[name].update(lst)
+
+"""
+All the following decorators are registration decorators, i.e add an attribute to current class
+ (task_gen and its derivatives), with same name as func, which points to func itself.
+For example:
+ @taskgen
+ def sayHi(self):
+ print("hi")
+Now taskgen.sayHi() may be called
+
+If python were really smart, it could infer itself the order of methods by looking at the
+attributes. A prerequisite for execution is to have the attribute set before.
+Intelligent compilers binding aspect-oriented programming and parallelization, what a nice topic for studies.
+"""
+def taskgen(func):
+ """
+ register a method as a task generator method
+ """
+ setattr(task_gen, func.__name__, func)
+ return func
+
+def feature(*k):
+ """
+ declare a task generator method that will be executed when the
+ object attribute 'feature' contains the corresponding key(s)
+ """
+ def deco(func):
+ setattr(task_gen, func.__name__, func)
+ for name in k:
+ task_gen.traits[name].update([func.__name__])
+ return func
+ return deco
+
+def before(*k):
+ """
+ declare a task generator method which will be executed
+ before the functions of given name(s)
+ """
+ def deco(func):
+ setattr(task_gen, func.__name__, func)
+ for fun_name in k:
+ if not func.__name__ in task_gen.prec[fun_name]:
+ task_gen.prec[fun_name].append(func.__name__)
+ return func
+ return deco
+
+def after(*k):
+ """
+ declare a task generator method which will be executed
+ after the functions of given name(s)
+ """
+ def deco(func):
+ setattr(task_gen, func.__name__, func)
+ for fun_name in k:
+ if not fun_name in task_gen.prec[func.__name__]:
+ task_gen.prec[func.__name__].append(fun_name)
+ return func
+ return deco
+
+def extension(var):
+ """
+ declare a task generator method which will be invoked during
+ the processing of source files for the extension given
+ """
+ def deco(func):
+ setattr(task_gen, func.__name__, func)
+ try:
+ for x in Utils.to_list(var):
+ task_gen.mappings[x] = func
+ except:
+ raise Utils.WafError('extension takes either a list or a string %r' % var)
+ task_gen.mapped[func.__name__] = func
+ return func
+ return deco
+
+# TODO make certain the decorators may be used here
+
+def apply_core(self):
+ """Process the attribute source
+ transform the names into file nodes
+ try to process the files by name first, later by extension"""
+ # get the list of folders to use by the scanners
+ # all our objects share the same include paths anyway
+ find_resource = self.path.find_resource
+
+ for filename in self.to_list(self.source):
+ # if self.mappings or task_gen.mappings contains a file of the same name
+ x = self.get_hook(filename)
+ if x:
+ x(self, filename)
+ else:
+ node = find_resource(filename)
+ if not node: raise Utils.WafError("source not found: '%s' in '%s'" % (filename, str(self.path)))
+ self.allnodes.append(node)
+
+ for node in self.allnodes:
+ # self.mappings or task_gen.mappings map the file extension to a function
+ x = self.get_hook(node.suffix())
+
+ if not x:
+ raise Utils.WafError("Cannot guess how to process %s (got mappings %r in %r) -> try conf.check_tool(..)?" % \
+ (str(node), self.__class__.mappings.keys(), self.__class__))
+ x(self, node)
+feature('*')(apply_core)
+
+def exec_rule(self):
+ """Process the attribute rule, when provided the method apply_core will be disabled
+ """
+ if not getattr(self, 'rule', None):
+ return
+
+ # someone may have removed it already
+ try:
+ self.meths.remove('apply_core')
+ except ValueError:
+ pass
+
+ # get the function and the variables
+ func = self.rule
+
+ vars2 = []
+ if isinstance(func, str):
+ # use the shell by default for user-defined commands
+ (func, vars2) = Task.compile_fun('', self.rule, shell=getattr(self, 'shell', True))
+ func.code = self.rule
+
+ # create the task class
+ name = getattr(self, 'name', None) or self.target or self.rule
+ if not isinstance(name, str):
+ name = str(self.idx)
+ cls = Task.task_type_from_func(name, func, getattr(self, 'vars', vars2))
+ cls.color = getattr(self, 'color', 'BLUE')
+
+ # now create one instance
+ tsk = self.create_task(name)
+
+ dep_vars = getattr(self, 'dep_vars', ['ruledeps'])
+ if dep_vars:
+ tsk.dep_vars = dep_vars
+ if isinstance(self.rule, str):
+ tsk.env.ruledeps = self.rule
+ else:
+ # only works if the function is in a global module such as a waf tool
+ tsk.env.ruledeps = Utils.h_fun(self.rule)
+
+ # we assume that the user knows that without inputs or outputs
+ #if not getattr(self, 'target', None) and not getattr(self, 'source', None):
+ # cls.quiet = True
+
+ if getattr(self, 'target', None):
+ cls.quiet = True
+ tsk.outputs = [self.path.find_or_declare(x) for x in self.to_list(self.target)]
+
+ if getattr(self, 'source', None):
+ cls.quiet = True
+ tsk.inputs = []
+ for x in self.to_list(self.source):
+ y = self.path.find_resource(x)
+ if not y:
+ raise Utils.WafError('input file %r could not be found (%r)' % (x, self.path.abspath()))
+ tsk.inputs.append(y)
+
+ if self.allnodes:
+ tsk.inputs.extend(self.allnodes)
+
+ if getattr(self, 'scan', None):
+ cls.scan = self.scan
+
+ if getattr(self, 'install_path', None):
+ tsk.install_path = self.install_path
+
+ if getattr(self, 'cwd', None):
+ tsk.cwd = self.cwd
+
+ if getattr(self, 'on_results', None) or getattr(self, 'update_outputs', None):
+ Task.update_outputs(cls)
+
+ if getattr(self, 'always', None):
+ Task.always_run(cls)
+
+ for x in ['after', 'before', 'ext_in', 'ext_out']:
+ setattr(cls, x, getattr(self, x, []))
+feature('*')(exec_rule)
+before('apply_core')(exec_rule)
+
+def sequence_order(self):
+ """
+ add a strict sequential constraint between the tasks generated by task generators
+ it uses the fact that task generators are posted in order
+ it will not post objects which belong to other folders
+ there is also an awesome trick for executing the method in last position
+
+ to use:
+ bld(features='javac seq')
+ bld(features='jar seq')
+
+ to start a new sequence, set the attribute seq_start, for example:
+ obj.seq_start = True
+ """
+ if self.meths and self.meths[-1] != 'sequence_order':
+ self.meths.append('sequence_order')
+ return
+
+ if getattr(self, 'seq_start', None):
+ return
+
+ # all the tasks previously declared must be run before these
+ if getattr(self.bld, 'prev', None):
+ self.bld.prev.post()
+ for x in self.bld.prev.tasks:
+ for y in self.tasks:
+ y.set_run_after(x)
+
+ self.bld.prev = self
+
+feature('seq')(sequence_order)
diff --git a/third_party/waf/wafadmin/Tools/__init__.py b/third_party/waf/wafadmin/Tools/__init__.py
new file mode 100644
index 0000000..8f026e1
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/__init__.py
@@ -0,0 +1,3 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006 (ita)
diff --git a/third_party/waf/wafadmin/Tools/ar.py b/third_party/waf/wafadmin/Tools/ar.py
new file mode 100644
index 0000000..3571670
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/ar.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006-2008 (ita)
+# Ralf Habacker, 2006 (rh)
+
+"ar and ranlib"
+
+import os, sys
+import Task, Utils
+from Configure import conftest
+
+ar_str = '${AR} ${ARFLAGS} ${AR_TGT_F}${TGT} ${AR_SRC_F}${SRC}'
+cls = Task.simple_task_type('static_link', ar_str, color='YELLOW', ext_in='.o', ext_out='.bin', shell=False)
+cls.maxjobs = 1
+cls.install = Utils.nada
+
+# remove the output in case it already exists
+old = cls.run
+def wrap(self):
+ try: os.remove(self.outputs[0].abspath(self.env))
+ except OSError: pass
+ return old(self)
+setattr(cls, 'run', wrap)
+
+def detect(conf):
+ conf.find_program('ar', var='AR')
+ conf.find_program('ranlib', var='RANLIB')
+ conf.env.ARFLAGS = 'rcs'
+
+@conftest
+def find_ar(conf):
+ v = conf.env
+ conf.check_tool('ar')
+ if not v['AR']: conf.fatal('ar is required for static libraries - not found')
diff --git a/third_party/waf/wafadmin/Tools/bison.py b/third_party/waf/wafadmin/Tools/bison.py
new file mode 100644
index 0000000..c281e61
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/bison.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# John O'Meara, 2006
+# Thomas Nagy 2009
+
+"Bison processing"
+
+import Task
+from TaskGen import extension
+
+bison = '${BISON} ${BISONFLAGS} ${SRC[0].abspath()} -o ${TGT[0].name}'
+cls = Task.simple_task_type('bison', bison, 'GREEN', ext_in='.yc .y .yy', ext_out='.c .cxx .h .l', shell=False)
+
+@extension(['.y', '.yc', '.yy'])
+def big_bison(self, node):
+ """when it becomes complicated (unlike flex), the old recipes work better (cwd)"""
+ has_h = '-d' in self.env['BISONFLAGS']
+
+ outs = []
+ if node.name.endswith('.yc'):
+ outs.append(node.change_ext('.tab.cc'))
+ if has_h:
+ outs.append(node.change_ext('.tab.hh'))
+ else:
+ outs.append(node.change_ext('.tab.c'))
+ if has_h:
+ outs.append(node.change_ext('.tab.h'))
+
+ tsk = self.create_task('bison', node, outs)
+ tsk.cwd = node.bld_dir(tsk.env)
+
+ # and the c/cxx file must be compiled too
+ self.allnodes.append(outs[0])
+
+def detect(conf):
+ bison = conf.find_program('bison', var='BISON', mandatory=True)
+ conf.env['BISONFLAGS'] = '-d'
diff --git a/third_party/waf/wafadmin/Tools/cc.py b/third_party/waf/wafadmin/Tools/cc.py
new file mode 100644
index 0000000..e54df47
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/cc.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006 (ita)
+
+"Base for c programs/libraries"
+
+import os
+import TaskGen, Build, Utils, Task
+from Logs import debug
+import ccroot
+from TaskGen import feature, before, extension, after
+
+g_cc_flag_vars = [
+'CCDEPS', 'FRAMEWORK', 'FRAMEWORKPATH',
+'STATICLIB', 'LIB', 'LIBPATH', 'LINKFLAGS', 'RPATH',
+'CCFLAGS', 'CPPPATH', 'CPPFLAGS', 'CCDEFINES']
+
+EXT_CC = ['.c']
+
+g_cc_type_vars = ['CCFLAGS', 'LINKFLAGS']
+
+# TODO remove in waf 1.6
+class cc_taskgen(ccroot.ccroot_abstract):
+ pass
+
+@feature('cc')
+@before('apply_type_vars')
+@after('default_cc')
+def init_cc(self):
+ self.p_flag_vars = set(self.p_flag_vars).union(g_cc_flag_vars)
+ self.p_type_vars = set(self.p_type_vars).union(g_cc_type_vars)
+
+ if not self.env['CC_NAME']:
+ raise Utils.WafError("At least one compiler (gcc, ..) must be selected")
+
+@feature('cc')
+@after('apply_incpaths')
+def apply_obj_vars_cc(self):
+ """after apply_incpaths for INC_PATHS"""
+ env = self.env
+ app = env.append_unique
+ cpppath_st = env['CPPPATH_ST']
+
+ # local flags come first
+ # set the user-defined includes paths
+ for i in env['INC_PATHS']:
+ app('_CCINCFLAGS', cpppath_st % i.bldpath(env))
+ app('_CCINCFLAGS', cpppath_st % i.srcpath(env))
+
+ # set the library include paths
+ for i in env['CPPPATH']:
+ app('_CCINCFLAGS', cpppath_st % i)
+
+@feature('cc')
+@after('apply_lib_vars')
+def apply_defines_cc(self):
+ """after uselib is set for CCDEFINES"""
+ self.defines = getattr(self, 'defines', [])
+ lst = self.to_list(self.defines) + self.to_list(self.env['CCDEFINES'])
+ milst = []
+
+ # now process the local defines
+ for defi in lst:
+ if not defi in milst:
+ milst.append(defi)
+
+ # CCDEFINES_
+ libs = self.to_list(self.uselib)
+ for l in libs:
+ val = self.env['CCDEFINES_'+l]
+ if val: milst += val
+ self.env['DEFLINES'] = ["%s %s" % (x[0], Utils.trimquotes('='.join(x[1:]))) for x in [y.split('=') for y in milst]]
+ y = self.env['CCDEFINES_ST']
+ self.env.append_unique('_CCDEFFLAGS', [y%x for x in milst])
+
+@extension(EXT_CC)
+def c_hook(self, node):
+ # create the compilation task: cpp or cc
+ if getattr(self, 'obj_ext', None):
+ obj_ext = self.obj_ext
+ else:
+ obj_ext = '_%d.o' % self.idx
+
+ task = self.create_task('cc', node, node.change_ext(obj_ext))
+ try:
+ self.compiled_tasks.append(task)
+ except AttributeError:
+ raise Utils.WafError('Have you forgotten to set the feature "cc" on %s?' % str(self))
+ return task
+
+cc_str = '${CC} ${CCFLAGS} ${CPPFLAGS} ${_CCINCFLAGS} ${_CCDEFFLAGS} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT}'
+cls = Task.simple_task_type('cc', cc_str, 'GREEN', ext_out='.o', ext_in='.c', shell=False)
+cls.scan = ccroot.scan
+cls.vars.append('CCDEPS')
+
+link_str = '${LINK_CC} ${CCLNK_SRC_F}${SRC} ${CCLNK_TGT_F}${TGT[0].abspath(env)} ${LINKFLAGS}'
+cls = Task.simple_task_type('cc_link', link_str, color='YELLOW', ext_in='.o', ext_out='.bin', shell=False)
+cls.maxjobs = 1
+cls.install = Utils.nada
diff --git a/third_party/waf/wafadmin/Tools/ccroot.py b/third_party/waf/wafadmin/Tools/ccroot.py
new file mode 100644
index 0000000..c130b40
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/ccroot.py
@@ -0,0 +1,639 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2005-2008 (ita)
+
+"base for all c/c++ programs and libraries"
+
+import os, sys, re
+import TaskGen, Task, Utils, preproc, Logs, Build, Options
+from Logs import error, debug, warn
+from Utils import md5
+from TaskGen import taskgen, after, before, feature
+from Constants import *
+from Configure import conftest
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+
+import config_c # <- necessary for the configuration, do not touch
+
+USE_TOP_LEVEL = False
+
+def get_cc_version(conf, cc, gcc=False, icc=False):
+
+ cmd = cc + ['-dM', '-E', '-']
+ try:
+ p = Utils.pproc.Popen(cmd, stdin=Utils.pproc.PIPE, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE)
+ p.stdin.write('\n')
+ out = p.communicate()[0]
+ except:
+ conf.fatal('could not determine the compiler version %r' % cmd)
+
+ # PY3K: do not touch
+ out = str(out)
+
+ if gcc:
+ if out.find('__INTEL_COMPILER') >= 0:
+ conf.fatal('The intel compiler pretends to be gcc')
+ if out.find('__GNUC__') < 0:
+ conf.fatal('Could not determine the compiler type')
+
+ if icc and out.find('__INTEL_COMPILER') < 0:
+ conf.fatal('Not icc/icpc')
+
+ k = {}
+ if icc or gcc:
+ out = out.split('\n')
+ import shlex
+
+ for line in out:
+ lst = shlex.split(line)
+ if len(lst)>2:
+ key = lst[1]
+ val = lst[2]
+ k[key] = val
+
+ def isD(var):
+ return var in k
+
+ def isT(var):
+ return var in k and k[var] != '0'
+
+ # Some documentation is available at http://predef.sourceforge.net
+ # The names given to DEST_OS must match what Utils.unversioned_sys_platform() returns.
+ mp1 = {
+ '__linux__' : 'linux',
+ '__GNU__' : 'gnu',
+ '__FreeBSD__' : 'freebsd',
+ '__NetBSD__' : 'netbsd',
+ '__OpenBSD__' : 'openbsd',
+ '__sun' : 'sunos',
+ '__hpux' : 'hpux',
+ '__sgi' : 'irix',
+ '_AIX' : 'aix',
+ '__CYGWIN__' : 'cygwin',
+ '__MSYS__' : 'msys',
+ '_UWIN' : 'uwin',
+ '_WIN64' : 'win32',
+ '_WIN32' : 'win32',
+ '__POWERPC__' : 'powerpc',
+ }
+
+ for i in mp1:
+ if isD(i):
+ conf.env.DEST_OS = mp1[i]
+ break
+ else:
+ if isD('__APPLE__') and isD('__MACH__'):
+ conf.env.DEST_OS = 'darwin'
+ elif isD('__unix__'): # unix must be tested last as it's a generic fallback
+ conf.env.DEST_OS = 'generic'
+
+ if isD('__ELF__'):
+ conf.env.DEST_BINFMT = 'elf'
+ elif isD('__WINNT__') or isD('__CYGWIN__'):
+ conf.env.DEST_BINFMT = 'pe'
+ elif isD('__APPLE__'):
+ conf.env.DEST_BINFMT = 'mac-o'
+
+ mp2 = {
+ '__x86_64__' : 'x86_64',
+ '__i386__' : 'x86',
+ '__ia64__' : 'ia',
+ '__mips__' : 'mips',
+ '__sparc__' : 'sparc',
+ '__alpha__' : 'alpha',
+ '__arm__' : 'arm',
+ '__hppa__' : 'hppa',
+ '__powerpc__' : 'powerpc',
+ }
+ for i in mp2:
+ if isD(i):
+ conf.env.DEST_CPU = mp2[i]
+ break
+
+ debug('ccroot: dest platform: ' + ' '.join([conf.env[x] or '?' for x in ('DEST_OS', 'DEST_BINFMT', 'DEST_CPU')]))
+ conf.env['CC_VERSION'] = (k['__GNUC__'], k['__GNUC_MINOR__'], k['__GNUC_PATCHLEVEL__'])
+ return k
+
+class DEBUG_LEVELS:
+ """Will disappear in waf 1.6"""
+ ULTRADEBUG = "ultradebug"
+ DEBUG = "debug"
+ RELEASE = "release"
+ OPTIMIZED = "optimized"
+ CUSTOM = "custom"
+
+ ALL = [ULTRADEBUG, DEBUG, RELEASE, OPTIMIZED, CUSTOM]
+
+def scan(self):
+ "look for .h the .cpp need"
+ debug('ccroot: _scan_preprocessor(self, node, env, path_lst)')
+
+ # TODO waf 1.6 - assume the default input has exactly one file
+
+ if len(self.inputs) == 1:
+ node = self.inputs[0]
+ (nodes, names) = preproc.get_deps(node, self.env, nodepaths = self.env['INC_PATHS'])
+ if Logs.verbose:
+ debug('deps: deps for %s: %r; unresolved %r', str(node), nodes, names)
+ return (nodes, names)
+
+ all_nodes = []
+ all_names = []
+ seen = set()
+ for node in self.inputs:
+ (nodes, names) = preproc.get_deps(node, self.env, nodepaths = self.env['INC_PATHS'])
+ if Logs.verbose:
+ debug('deps: deps for %s: %r; unresolved %r', str(node), nodes, names)
+ for x in nodes:
+ if id(x) in seen: continue
+ seen.add(id(x))
+ all_nodes.append(x)
+ for x in names:
+ if not x in all_names:
+ all_names.append(x)
+ return (all_nodes, all_names)
+
+class ccroot_abstract(TaskGen.task_gen):
+ "Parent class for programs and libraries in languages c, c++ and moc (Qt)"
+ def __init__(self, *k, **kw):
+ # COMPAT remove in waf 1.6 TODO
+ if len(k) > 1:
+ k = list(k)
+ if k[1][0] != 'c':
+ k[1] = 'c' + k[1]
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+def get_target_name(self):
+ tp = 'program'
+ for x in self.features:
+ if x in ['cshlib', 'cstaticlib']:
+ tp = x.lstrip('c')
+
+ pattern = self.env[tp + '_PATTERN']
+ if not pattern: pattern = '%s'
+
+ dir, name = os.path.split(self.target)
+
+ if 'cshlib' in self.features and getattr(self, 'vnum', None):
+ nums = self.vnum.split('.')
+ if self.env.DEST_BINFMT == 'pe':
+ # include the version in the dll file name,
+ # the import lib file name stays unversionned.
+ name = name + '-' + nums[0]
+ elif self.env.DEST_OS == 'openbsd':
+ pattern = '%s.%s' % (pattern, nums[0])
+ if len(nums) >= 2:
+ pattern += '.%s' % nums[1]
+
+ return os.path.join(dir, pattern % name)
+
+@feature('cc', 'cxx')
+@before('apply_core')
+def default_cc(self):
+ """compiled_tasks attribute must be set before the '.c->.o' tasks can be created"""
+ Utils.def_attrs(self,
+ includes = '',
+ defines= '',
+ rpaths = '',
+ uselib = '',
+ uselib_local = '',
+ add_objects = '',
+ p_flag_vars = [],
+ p_type_vars = [],
+ compiled_tasks = [],
+ link_task = None)
+
+ # The only thing we need for cross-compilation is DEST_BINFMT.
+ # At some point, we may reach a case where DEST_BINFMT is not enough, but for now it's sufficient.
+ # Currently, cross-compilation is auto-detected only for the gnu and intel compilers.
+ if not self.env.DEST_BINFMT:
+ # Infer the binary format from the os name.
+ self.env.DEST_BINFMT = Utils.unversioned_sys_platform_to_binary_format(
+ self.env.DEST_OS or Utils.unversioned_sys_platform())
+
+ if not self.env.BINDIR: self.env.BINDIR = Utils.subst_vars('${PREFIX}/bin', self.env)
+ if not self.env.LIBDIR: self.env.LIBDIR = Utils.subst_vars('${PREFIX}/lib${LIB_EXT}', self.env)
+
+@feature('cprogram', 'dprogram', 'cstaticlib', 'dstaticlib', 'cshlib', 'dshlib')
+def apply_verif(self):
+ """no particular order, used for diagnostic"""
+ if not (self.source or getattr(self, 'add_objects', None) or getattr(self, 'uselib_local', None) or getattr(self, 'obj_files', None)):
+ raise Utils.WafError('no source files specified for %s' % self)
+ if not self.target:
+ raise Utils.WafError('no target for %s' % self)
+
+# TODO reference the d programs, shlibs in d.py, not here
+
+@feature('cprogram', 'dprogram')
+@after('default_cc')
+@before('apply_core')
+def vars_target_cprogram(self):
+ self.default_install_path = self.env.BINDIR
+ self.default_chmod = O755
+
+@after('default_cc')
+@feature('cshlib', 'dshlib')
+@before('apply_core')
+def vars_target_cshlib(self):
+ if self.env.DEST_BINFMT == 'pe':
+ # set execute bit on libs to avoid 'permission denied' (issue 283)
+ self.default_chmod = O755
+ self.default_install_path = self.env.BINDIR
+ else:
+ self.default_install_path = self.env.LIBDIR
+
+@feature('cprogram', 'dprogram', 'cstaticlib', 'dstaticlib', 'cshlib', 'dshlib')
+@after('apply_link', 'vars_target_cprogram', 'vars_target_cshlib')
+def default_link_install(self):
+ """you may kill this method to inject your own installation for the first element
+ any other install should only process its own nodes and not those from the others"""
+ if self.install_path:
+ self.bld.install_files(self.install_path, self.link_task.outputs[0], env=self.env, chmod=self.chmod)
+
+@feature('cc', 'cxx')
+@after('apply_type_vars', 'apply_lib_vars', 'apply_core')
+def apply_incpaths(self):
+ """used by the scanner
+ after processing the uselib for CPPPATH
+ after apply_core because some processing may add include paths
+ """
+ lst = []
+ # TODO move the uselib processing out of here
+ for lib in self.to_list(self.uselib):
+ for path in self.env['CPPPATH_' + lib]:
+ if not path in lst:
+ lst.append(path)
+ if preproc.go_absolute:
+ for path in preproc.standard_includes:
+ if not path in lst:
+ lst.append(path)
+
+ for path in self.to_list(self.includes):
+ if not path in lst:
+ if preproc.go_absolute or not os.path.isabs(path):
+ lst.append(path)
+ else:
+ self.env.prepend_value('CPPPATH', path)
+
+ for path in lst:
+ node = None
+ if os.path.isabs(path):
+ if preproc.go_absolute:
+ node = self.bld.root.find_dir(path)
+ elif path[0] == '#':
+ node = self.bld.srcnode
+ if len(path) > 1:
+ node = node.find_dir(path[1:])
+ else:
+ node = self.path.find_dir(path)
+
+ if node:
+ self.env.append_value('INC_PATHS', node)
+
+ # TODO WAF 1.6
+ if USE_TOP_LEVEL:
+ self.env.append_value('INC_PATHS', self.bld.srcnode)
+
+@feature('cc', 'cxx')
+@after('init_cc', 'init_cxx')
+@before('apply_lib_vars')
+def apply_type_vars(self):
+ """before apply_lib_vars because we modify uselib
+ after init_cc and init_cxx because web need p_type_vars
+ """
+ for x in self.features:
+ if not x in ['cprogram', 'cstaticlib', 'cshlib']:
+ continue
+ x = x.lstrip('c')
+
+ # if the type defines uselib to add, add them
+ st = self.env[x + '_USELIB']
+ if st: self.uselib = self.uselib + ' ' + st
+
+ # each compiler defines variables like 'shlib_CXXFLAGS', 'shlib_LINKFLAGS', etc
+ # so when we make a task generator of the type shlib, CXXFLAGS are modified accordingly
+ for var in self.p_type_vars:
+ compvar = '%s_%s' % (x, var)
+ #print compvar
+ value = self.env[compvar]
+ if value: self.env.append_value(var, value)
+
+@feature('cprogram', 'cshlib', 'cstaticlib')
+@after('apply_core')
+def apply_link(self):
+ """executes after apply_core for collecting 'compiled_tasks'
+ use a custom linker if specified (self.link='name-of-custom-link-task')"""
+ link = getattr(self, 'link', None)
+ if not link:
+ if 'cstaticlib' in self.features: link = 'static_link'
+ elif 'cxx' in self.features: link = 'cxx_link'
+ else: link = 'cc_link'
+
+ tsk = self.create_task(link)
+ outputs = [t.outputs[0] for t in self.compiled_tasks]
+ tsk.set_inputs(outputs)
+ tsk.set_outputs(self.path.find_or_declare(get_target_name(self)))
+
+ self.link_task = tsk
+
+@feature('cc', 'cxx')
+@after('apply_link', 'init_cc', 'init_cxx', 'apply_core')
+def apply_lib_vars(self):
+ """after apply_link because of 'link_task'
+ after default_cc because of the attribute 'uselib'"""
+
+ # after 'apply_core' in case if 'cc' if there is no link
+
+ env = self.env
+
+ # 1. the case of the libs defined in the project (visit ancestors first)
+ # the ancestors external libraries (uselib) will be prepended
+ self.uselib = self.to_list(self.uselib)
+ names = self.to_list(self.uselib_local)
+
+ seen = set([])
+ tmp = Utils.deque(names) # consume a copy of the list of names
+ while tmp:
+ lib_name = tmp.popleft()
+ # visit dependencies only once
+ if lib_name in seen:
+ continue
+
+ y = self.name_to_obj(lib_name)
+ if not y:
+ raise Utils.WafError('object %r was not found in uselib_local (required by %r)' % (lib_name, self.name))
+ y.post()
+ seen.add(lib_name)
+
+ # object has ancestors to process (shared libraries): add them to the end of the list
+ if getattr(y, 'uselib_local', None):
+ lst = y.to_list(y.uselib_local)
+ if 'cshlib' in y.features or 'cprogram' in y.features:
+ lst = [x for x in lst if not 'cstaticlib' in self.name_to_obj(x).features]
+ tmp.extend(lst)
+
+ # link task and flags
+ if getattr(y, 'link_task', None):
+
+ link_name = y.target[y.target.rfind(os.sep) + 1:]
+ if 'cstaticlib' in y.features:
+ env.append_value('STATICLIB', link_name)
+ elif 'cshlib' in y.features or 'cprogram' in y.features:
+ # WARNING some linkers can link against programs
+ env.append_value('LIB', link_name)
+
+ # the order
+ self.link_task.set_run_after(y.link_task)
+
+ # for the recompilation
+ dep_nodes = getattr(self.link_task, 'dep_nodes', [])
+ self.link_task.dep_nodes = dep_nodes + y.link_task.outputs
+
+ # add the link path too
+ tmp_path = y.link_task.outputs[0].parent.bldpath(self.env)
+ if not tmp_path in env['LIBPATH']: env.prepend_value('LIBPATH', tmp_path)
+
+ # add ancestors uselib too - but only propagate those that have no staticlib
+ for v in self.to_list(y.uselib):
+ if not env['STATICLIB_' + v]:
+ if not v in self.uselib:
+ self.uselib.insert(0, v)
+
+ # if the library task generator provides 'export_incdirs', add to the include path
+ # the export_incdirs must be a list of paths relative to the other library
+ if getattr(y, 'export_incdirs', None):
+ for x in self.to_list(y.export_incdirs):
+ node = y.path.find_dir(x)
+ if not node:
+ raise Utils.WafError('object %r: invalid folder %r in export_incdirs' % (y.target, x))
+ self.env.append_unique('INC_PATHS', node)
+
+ # 2. the case of the libs defined outside
+ for x in self.uselib:
+ for v in self.p_flag_vars:
+ val = self.env[v + '_' + x]
+ if val: self.env.append_value(v, val)
+
+@feature('cprogram', 'cstaticlib', 'cshlib')
+@after('init_cc', 'init_cxx', 'apply_link')
+def apply_objdeps(self):
+ "add the .o files produced by some other object files in the same manner as uselib_local"
+ if not getattr(self, 'add_objects', None): return
+
+ seen = []
+ names = self.to_list(self.add_objects)
+ while names:
+ x = names[0]
+
+ # visit dependencies only once
+ if x in seen:
+ names = names[1:]
+ continue
+
+ # object does not exist ?
+ y = self.name_to_obj(x)
+ if not y:
+ raise Utils.WafError('object %r was not found in uselib_local (required by add_objects %r)' % (x, self.name))
+
+ # object has ancestors to process first ? update the list of names
+ if getattr(y, 'add_objects', None):
+ added = 0
+ lst = y.to_list(y.add_objects)
+ lst.reverse()
+ for u in lst:
+ if u in seen: continue
+ added = 1
+ names = [u]+names
+ if added: continue # list of names modified, loop
+
+ # safe to process the current object
+ y.post()
+ seen.append(x)
+
+ for t in y.compiled_tasks:
+ self.link_task.inputs.extend(t.outputs)
+
+@feature('cprogram', 'cshlib', 'cstaticlib')
+@after('apply_lib_vars')
+def apply_obj_vars(self):
+ """after apply_lib_vars for uselib"""
+ v = self.env
+ lib_st = v['LIB_ST']
+ staticlib_st = v['STATICLIB_ST']
+ libpath_st = v['LIBPATH_ST']
+ staticlibpath_st = v['STATICLIBPATH_ST']
+ rpath_st = v['RPATH_ST']
+
+ app = v.append_unique
+
+ if v['FULLSTATIC']:
+ v.append_value('LINKFLAGS', v['FULLSTATIC_MARKER'])
+
+ for i in v['RPATH']:
+ if i and rpath_st:
+ app('LINKFLAGS', rpath_st % i)
+
+ for i in v['LIBPATH']:
+ app('LINKFLAGS', libpath_st % i)
+ app('LINKFLAGS', staticlibpath_st % i)
+
+ if v['STATICLIB']:
+ v.append_value('LINKFLAGS', v['STATICLIB_MARKER'])
+ k = [(staticlib_st % i) for i in v['STATICLIB']]
+ app('LINKFLAGS', k)
+
+ # fully static binaries ?
+ if not v['FULLSTATIC']:
+ if v['STATICLIB'] or v['LIB']:
+ v.append_value('LINKFLAGS', v['SHLIB_MARKER'])
+
+ app('LINKFLAGS', [lib_st % i for i in v['LIB']])
+
+@after('apply_link')
+def process_obj_files(self):
+ if not hasattr(self, 'obj_files'): return
+ for x in self.obj_files:
+ node = self.path.find_resource(x)
+ self.link_task.inputs.append(node)
+
+@taskgen
+def add_obj_file(self, file):
+ """Small example on how to link object files as if they were source
+ obj = bld.create_obj('cc')
+ obj.add_obj_file('foo.o')"""
+ if not hasattr(self, 'obj_files'): self.obj_files = []
+ if not 'process_obj_files' in self.meths: self.meths.append('process_obj_files')
+ self.obj_files.append(file)
+
+c_attrs = {
+'cxxflag' : 'CXXFLAGS',
+'cflag' : 'CCFLAGS',
+'ccflag' : 'CCFLAGS',
+'linkflag' : 'LINKFLAGS',
+'ldflag' : 'LINKFLAGS',
+'lib' : 'LIB',
+'libpath' : 'LIBPATH',
+'staticlib': 'STATICLIB',
+'staticlibpath': 'STATICLIBPATH',
+'rpath' : 'RPATH',
+'framework' : 'FRAMEWORK',
+'frameworkpath' : 'FRAMEWORKPATH'
+}
+
+@feature('cc', 'cxx')
+@before('init_cxx', 'init_cc')
+@before('apply_lib_vars', 'apply_obj_vars', 'apply_incpaths', 'init_cc')
+def add_extra_flags(self):
+ """case and plural insensitive
+ before apply_obj_vars for processing the library attributes
+ """
+ for x in self.__dict__.keys():
+ y = x.lower()
+ if y[-1] == 's':
+ y = y[:-1]
+ if c_attrs.get(y, None):
+ self.env.append_unique(c_attrs[y], getattr(self, x))
+
+# ============ the code above must not know anything about import libs ==========
+
+@feature('cshlib')
+@after('apply_link', 'default_cc')
+@before('apply_lib_vars', 'apply_objdeps', 'default_link_install')
+def apply_implib(self):
+ """On mswindows, handle dlls and their import libs
+ the .dll.a is the import lib and it is required for linking so it is installed too
+ """
+ if not self.env.DEST_BINFMT == 'pe':
+ return
+
+ self.meths.remove('default_link_install')
+
+ bindir = self.install_path
+ if not bindir: return
+
+ # install the dll in the bin dir
+ dll = self.link_task.outputs[0]
+ self.bld.install_files(bindir, dll, self.env, self.chmod)
+
+ # add linker flags to generate the import lib
+ implib = self.env['implib_PATTERN'] % os.path.split(self.target)[1]
+
+ implib = dll.parent.find_or_declare(implib)
+ self.link_task.outputs.append(implib)
+ self.bld.install_as('${LIBDIR}/%s' % implib.name, implib, self.env)
+
+ self.env.append_value('LINKFLAGS', (self.env['IMPLIB_ST'] % implib.bldpath(self.env)).split())
+
+# ============ the code above must not know anything about vnum processing on unix platforms =========
+
+@feature('cshlib')
+@after('apply_link')
+@before('apply_lib_vars', 'default_link_install')
+def apply_vnum(self):
+ """
+ libfoo.so is installed as libfoo.so.1.2.3
+ """
+ if not getattr(self, 'vnum', '') or not 'cshlib' in self.features or os.name != 'posix' or self.env.DEST_BINFMT not in ('elf', 'mac-o'):
+ return
+
+ self.meths.remove('default_link_install')
+
+ link = self.link_task
+ nums = self.vnum.split('.')
+ node = link.outputs[0]
+
+ libname = node.name
+ if libname.endswith('.dylib'):
+ name3 = libname.replace('.dylib', '.%s.dylib' % self.vnum)
+ name2 = libname.replace('.dylib', '.%s.dylib' % nums[0])
+ else:
+ name3 = libname + '.' + self.vnum
+ name2 = libname + '.' + nums[0]
+
+ if self.env.SONAME_ST:
+ v = self.env.SONAME_ST % name2
+ self.env.append_value('LINKFLAGS', v.split())
+
+ bld = self.bld
+ nums = self.vnum.split('.')
+
+ path = self.install_path
+ if not path: return
+
+ if self.env.DEST_OS == 'openbsd':
+ libname = self.link_task.outputs[0].name
+ bld.install_as('%s%s%s' % (path, os.sep, libname), node, env=self.env)
+ else:
+ bld.install_as(path + os.sep + name3, node, env=self.env)
+ bld.symlink_as(path + os.sep + name2, name3)
+ bld.symlink_as(path + os.sep + libname, name3)
+
+ # the following task is just to enable execution from the build dir :-/
+ if self.env.DEST_OS != 'openbsd':
+ self.create_task('vnum', node, [node.parent.find_or_declare(name2), node.parent.find_or_declare(name3)])
+
+def exec_vnum_link(self):
+ for x in self.outputs:
+ path = x.abspath(self.env)
+ try:
+ os.remove(path)
+ except OSError:
+ pass
+
+ try:
+ os.symlink(self.inputs[0].name, path)
+ except OSError:
+ return 1
+
+cls = Task.task_type_from_func('vnum', func=exec_vnum_link, ext_in='.bin', color='CYAN')
+cls.quiet = 1
+
+# ============ the --as-needed flag should added during the configuration, not at runtime =========
+
+@conftest
+def add_as_needed(conf):
+ if conf.env.DEST_BINFMT == 'elf' and 'gcc' in (conf.env.CXX_NAME, conf.env.CC_NAME):
+ conf.env.append_unique('LINKFLAGS', '--as-needed')
diff --git a/third_party/waf/wafadmin/Tools/compiler_cc.py b/third_party/waf/wafadmin/Tools/compiler_cc.py
new file mode 100644
index 0000000..642458a
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/compiler_cc.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Matthias Jahn jahn dôt matthias ât freenet dôt de, 2007 (pmarat)
+
+import os, sys, imp, types, ccroot
+import optparse
+import Utils, Configure, Options
+from Logs import debug
+
+c_compiler = {
+ 'win32': ['msvc', 'gcc'],
+ 'cygwin': ['gcc'],
+ 'darwin': ['gcc'],
+ 'aix': ['xlc', 'gcc'],
+ 'linux': ['gcc', 'icc', 'suncc'],
+ 'sunos': ['gcc', 'suncc'],
+ 'irix': ['gcc'],
+ 'hpux': ['gcc'],
+ 'gnu': ['gcc'],
+ 'default': ['gcc']
+}
+
+def __list_possible_compiler(platform):
+ try:
+ return c_compiler[platform]
+ except KeyError:
+ return c_compiler["default"]
+
+def detect(conf):
+ """
+ for each compiler for the platform, try to configure the compiler
+ in theory the tools should raise a configuration error if the compiler
+ pretends to be something it is not (setting CC=icc and trying to configure gcc)
+ """
+ try: test_for_compiler = Options.options.check_c_compiler
+ except AttributeError: conf.fatal("Add set_options(opt): opt.tool_options('compiler_cc')")
+ orig = conf.env
+ for compiler in test_for_compiler.split():
+ conf.env = orig.copy()
+ try:
+ conf.check_tool(compiler)
+ except Configure.ConfigurationError, e:
+ debug('compiler_cc: %r' % e)
+ else:
+ if conf.env['CC']:
+ orig.table = conf.env.get_merged_dict()
+ conf.env = orig
+ conf.check_message(compiler, '', True)
+ conf.env['COMPILER_CC'] = compiler
+ break
+ conf.check_message(compiler, '', False)
+ break
+ else:
+ conf.fatal('could not configure a c compiler!')
+
+def set_options(opt):
+ build_platform = Utils.unversioned_sys_platform()
+ possible_compiler_list = __list_possible_compiler(build_platform)
+ test_for_compiler = ' '.join(possible_compiler_list)
+ cc_compiler_opts = opt.add_option_group("C Compiler Options")
+ cc_compiler_opts.add_option('--check-c-compiler', default="%s" % test_for_compiler,
+ help='On this platform (%s) the following C-Compiler will be checked by default: "%s"' % (build_platform, test_for_compiler),
+ dest="check_c_compiler")
+
+ for c_compiler in test_for_compiler.split():
+ opt.tool_options('%s' % c_compiler, option_group=cc_compiler_opts)
diff --git a/third_party/waf/wafadmin/Tools/compiler_cxx.py b/third_party/waf/wafadmin/Tools/compiler_cxx.py
new file mode 100644
index 0000000..aa0b0e7
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/compiler_cxx.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Matthias Jahn jahn dôt matthias ât freenet dôt de 2007 (pmarat)
+
+import os, sys, imp, types, ccroot
+import optparse
+import Utils, Configure, Options
+from Logs import debug
+
+cxx_compiler = {
+'win32': ['msvc', 'g++'],
+'cygwin': ['g++'],
+'darwin': ['g++'],
+'aix': ['xlc++', 'g++'],
+'linux': ['g++', 'icpc', 'sunc++'],
+'sunos': ['g++', 'sunc++'],
+'irix': ['g++'],
+'hpux': ['g++'],
+'gnu': ['g++'],
+'default': ['g++']
+}
+
+def __list_possible_compiler(platform):
+ try:
+ return cxx_compiler[platform]
+ except KeyError:
+ return cxx_compiler["default"]
+
+def detect(conf):
+ try: test_for_compiler = Options.options.check_cxx_compiler
+ except AttributeError: raise Configure.ConfigurationError("Add set_options(opt): opt.tool_options('compiler_cxx')")
+ orig = conf.env
+ for compiler in test_for_compiler.split():
+ try:
+ conf.env = orig.copy()
+ conf.check_tool(compiler)
+ except Configure.ConfigurationError, e:
+ debug('compiler_cxx: %r' % e)
+ else:
+ if conf.env['CXX']:
+ orig.table = conf.env.get_merged_dict()
+ conf.env = orig
+ conf.check_message(compiler, '', True)
+ conf.env['COMPILER_CXX'] = compiler
+ break
+ conf.check_message(compiler, '', False)
+ break
+ else:
+ conf.fatal('could not configure a cxx compiler!')
+
+def set_options(opt):
+ build_platform = Utils.unversioned_sys_platform()
+ possible_compiler_list = __list_possible_compiler(build_platform)
+ test_for_compiler = ' '.join(possible_compiler_list)
+ cxx_compiler_opts = opt.add_option_group('C++ Compiler Options')
+ cxx_compiler_opts.add_option('--check-cxx-compiler', default="%s" % test_for_compiler,
+ help='On this platform (%s) the following C++ Compiler will be checked by default: "%s"' % (build_platform, test_for_compiler),
+ dest="check_cxx_compiler")
+
+ for cxx_compiler in test_for_compiler.split():
+ opt.tool_options('%s' % cxx_compiler, option_group=cxx_compiler_opts)
diff --git a/third_party/waf/wafadmin/Tools/compiler_d.py b/third_party/waf/wafadmin/Tools/compiler_d.py
new file mode 100644
index 0000000..378277a
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/compiler_d.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Carlos Rafael Giani, 2007 (dv)
+
+import os, sys, imp, types
+import Utils, Configure, Options
+
+def detect(conf):
+ if getattr(Options.options, 'check_dmd_first', None):
+ test_for_compiler = ['dmd', 'gdc']
+ else:
+ test_for_compiler = ['gdc', 'dmd']
+
+ for d_compiler in test_for_compiler:
+ try:
+ conf.check_tool(d_compiler)
+ except:
+ pass
+ else:
+ break
+ else:
+ conf.fatal('no suitable d compiler was found')
+
+def set_options(opt):
+ d_compiler_opts = opt.add_option_group('D Compiler Options')
+ d_compiler_opts.add_option('--check-dmd-first', action='store_true',
+ help='checks for the gdc compiler before dmd (default is the other way round)',
+ dest='check_dmd_first',
+ default=False)
+
+ for d_compiler in ['gdc', 'dmd']:
+ opt.tool_options('%s' % d_compiler, option_group=d_compiler_opts)
diff --git a/third_party/waf/wafadmin/Tools/config_c.py b/third_party/waf/wafadmin/Tools/config_c.py
new file mode 100644
index 0000000..9f1103c
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/config_c.py
@@ -0,0 +1,748 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2005-2008 (ita)
+
+"""
+c/c++ configuration routines
+"""
+
+import os, imp, sys, shlex, shutil
+from Utils import md5
+import Build, Utils, Configure, Task, Options, Logs, TaskGen
+from Constants import *
+from Configure import conf, conftest
+
+cfg_ver = {
+ 'atleast-version': '>=',
+ 'exact-version': '==',
+ 'max-version': '<=',
+}
+
+SNIP1 = '''
+ int main() {
+ void *p;
+ p=(void*)(%s);
+ return 0;
+}
+'''
+
+SNIP2 = '''
+int main() {
+ if ((%(type_name)s *) 0) return 0;
+ if (sizeof (%(type_name)s)) return 0;
+}
+'''
+
+SNIP3 = '''
+int main() {
+ return 0;
+}
+'''
+
+def parse_flags(line, uselib, env):
+ """pkg-config still has bugs on some platforms, and there are many -config programs, parsing flags is necessary :-/"""
+
+ lst = shlex.split(line)
+ while lst:
+ x = lst.pop(0)
+ st = x[:2]
+ ot = x[2:]
+ app = env.append_value
+ if st == '-I' or st == '/I':
+ if not ot: ot = lst.pop(0)
+ app('CPPPATH_' + uselib, ot)
+ elif st == '-D':
+ if not ot: ot = lst.pop(0)
+ app('CXXDEFINES_' + uselib, ot)
+ app('CCDEFINES_' + uselib, ot)
+ elif st == '-l':
+ if not ot: ot = lst.pop(0)
+ app('LIB_' + uselib, ot)
+ elif st == '-L':
+ if not ot: ot = lst.pop(0)
+ app('LIBPATH_' + uselib, ot)
+ elif x == '-pthread' or x.startswith('+'):
+ app('CCFLAGS_' + uselib, x)
+ app('CXXFLAGS_' + uselib, x)
+ app('LINKFLAGS_' + uselib, x)
+ elif x == '-framework':
+ app('FRAMEWORK_' + uselib, lst.pop(0))
+ elif x.startswith('-F'):
+ app('FRAMEWORKPATH_' + uselib, x[2:])
+ elif x.startswith('-std'):
+ app('CCFLAGS_' + uselib, x)
+ app('CXXFLAGS_' + uselib, x)
+ app('LINKFLAGS_' + uselib, x)
+ #
+ # NOTE on special treatment of -Wl,-R and -Wl,-rpath:
+ #
+ # It is important to not put a library provided RPATH
+ # into the LINKFLAGS but in the RPATH instead, since
+ # the provided LINKFLAGS get prepended to our own internal
+ # RPATH later, and hence can potentially lead to linking
+ # in too old versions of our internal libs.
+ #
+ elif x.startswith('-Wl,-R'):
+ app('RPATH_' + uselib, x[6:])
+ elif x.startswith('-Wl,-rpath,'):
+ app('RPATH_' + uselib, x[11:])
+ elif x.startswith('-Wl'):
+ app('LINKFLAGS_' + uselib, x)
+ elif x.startswith('-m') or x.startswith('-f'):
+ app('CCFLAGS_' + uselib, x)
+ app('CXXFLAGS_' + uselib, x)
+
+@conf
+def ret_msg(self, f, kw):
+ """execute a function, when provided"""
+ if isinstance(f, str):
+ return f
+ return f(kw)
+
+@conf
+def validate_cfg(self, kw):
+ if not 'path' in kw:
+ kw['path'] = 'pkg-config --errors-to-stdout --print-errors'
+
+ # pkg-config version
+ if 'atleast_pkgconfig_version' in kw:
+ if not 'msg' in kw:
+ kw['msg'] = 'Checking for pkg-config version >= %s' % kw['atleast_pkgconfig_version']
+ return
+
+ # pkg-config --modversion
+ if 'modversion' in kw:
+ return
+
+ if 'variables' in kw:
+ if not 'msg' in kw:
+ kw['msg'] = 'Checking for %s variables' % kw['package']
+ return
+
+ # checking for the version of a module, for the moment, one thing at a time
+ for x in cfg_ver.keys():
+ y = x.replace('-', '_')
+ if y in kw:
+ if not 'package' in kw:
+ raise ValueError('%s requires a package' % x)
+
+ if not 'msg' in kw:
+ kw['msg'] = 'Checking for %s %s %s' % (kw['package'], cfg_ver[x], kw[y])
+ return
+
+ if not 'msg' in kw:
+ kw['msg'] = 'Checking for %s' % (kw['package'] or kw['path'])
+ if not 'okmsg' in kw:
+ kw['okmsg'] = 'yes'
+ if not 'errmsg' in kw:
+ kw['errmsg'] = 'not found'
+
+@conf
+def cmd_and_log(self, cmd, kw):
+ Logs.debug('runner: %s\n' % cmd)
+ if self.log:
+ self.log.write('%s\n' % cmd)
+
+ try:
+ p = Utils.pproc.Popen(cmd, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE, shell=True)
+ (out, err) = p.communicate()
+ except OSError, e:
+ self.log.write('error %r' % e)
+ self.fatal(str(e))
+
+ # placeholder, don't touch
+ out = str(out)
+ err = str(err)
+
+ if self.log:
+ self.log.write(out)
+ self.log.write(err)
+
+ if p.returncode:
+ if not kw.get('errmsg', ''):
+ if kw.get('mandatory', False):
+ kw['errmsg'] = out.strip()
+ else:
+ kw['errmsg'] = 'no'
+ self.fatal('fail')
+ return out
+
+@conf
+def exec_cfg(self, kw):
+
+ # pkg-config version
+ if 'atleast_pkgconfig_version' in kw:
+ cmd = '%s --atleast-pkgconfig-version=%s' % (kw['path'], kw['atleast_pkgconfig_version'])
+ self.cmd_and_log(cmd, kw)
+ if not 'okmsg' in kw:
+ kw['okmsg'] = 'yes'
+ return
+
+ # checking for the version of a module
+ for x in cfg_ver:
+ y = x.replace('-', '_')
+ if y in kw:
+ self.cmd_and_log('%s --%s=%s %s' % (kw['path'], x, kw[y], kw['package']), kw)
+ if not 'okmsg' in kw:
+ kw['okmsg'] = 'yes'
+ self.define(self.have_define(kw.get('uselib_store', kw['package'])), 1, 0)
+ break
+
+ # retrieving the version of a module
+ if 'modversion' in kw:
+ version = self.cmd_and_log('%s --modversion %s' % (kw['path'], kw['modversion']), kw).strip()
+ self.define('%s_VERSION' % Utils.quote_define_name(kw.get('uselib_store', kw['modversion'])), version)
+ return version
+
+ # retrieving variables of a module
+ if 'variables' in kw:
+ env = kw.get('env', self.env)
+ uselib = kw.get('uselib_store', kw['package'].upper())
+ vars = Utils.to_list(kw['variables'])
+ for v in vars:
+ val = self.cmd_and_log('%s --variable=%s %s' % (kw['path'], v, kw['package']), kw).strip()
+ var = '%s_%s' % (uselib, v)
+ env[var] = val
+ if not 'okmsg' in kw:
+ kw['okmsg'] = 'yes'
+ return
+
+ lst = [kw['path']]
+
+
+ defi = kw.get('define_variable', None)
+ if not defi:
+ defi = self.env.PKG_CONFIG_DEFINES or {}
+ for key, val in defi.iteritems():
+ lst.append('--define-variable=%s=%s' % (key, val))
+
+ lst.append(kw.get('args', ''))
+ lst.append(kw['package'])
+
+ # so we assume the command-line will output flags to be parsed afterwards
+ cmd = ' '.join(lst)
+ ret = self.cmd_and_log(cmd, kw)
+ if not 'okmsg' in kw:
+ kw['okmsg'] = 'yes'
+
+ self.define(self.have_define(kw.get('uselib_store', kw['package'])), 1, 0)
+ parse_flags(ret, kw.get('uselib_store', kw['package'].upper()), kw.get('env', self.env))
+ return ret
+
+@conf
+def check_cfg(self, *k, **kw):
+ """
+ for pkg-config mostly, but also all the -config tools
+ conf.check_cfg(path='mpicc', args='--showme:compile --showme:link', package='', uselib_store='OPEN_MPI')
+ conf.check_cfg(package='dbus-1', variables='system_bus_default_address session_bus_services_dir')
+ """
+
+ self.validate_cfg(kw)
+ if 'msg' in kw:
+ self.check_message_1(kw['msg'])
+ ret = None
+ try:
+ ret = self.exec_cfg(kw)
+ except Configure.ConfigurationError, e:
+ if 'errmsg' in kw:
+ self.check_message_2(kw['errmsg'], 'YELLOW')
+ if 'mandatory' in kw and kw['mandatory']:
+ if Logs.verbose > 1:
+ raise
+ else:
+ self.fatal('the configuration failed (see %r)' % self.log.name)
+ else:
+ kw['success'] = ret
+ if 'okmsg' in kw:
+ self.check_message_2(self.ret_msg(kw['okmsg'], kw))
+
+ return ret
+
+# the idea is the following: now that we are certain
+# that all the code here is only for c or c++, it is
+# easy to put all the logic in one function
+#
+# this should prevent code duplication (ita)
+
+# env: an optional environment (modified -> provide a copy)
+# compiler: cc or cxx - it tries to guess what is best
+# type: cprogram, cshlib, cstaticlib
+# code: a c code to execute
+# uselib_store: where to add the variables
+# uselib: parameters to use for building
+# define: define to set, like FOO in #define FOO, if not set, add /* #undef FOO */
+# execute: True or False - will return the result of the execution
+
+@conf
+def validate_c(self, kw):
+ """validate the parameters for the test method"""
+
+ if not 'env' in kw:
+ kw['env'] = self.env.copy()
+
+ env = kw['env']
+ if not 'compiler' in kw:
+ kw['compiler'] = 'cc'
+ if env['CXX_NAME'] and Task.TaskBase.classes.get('cxx', None):
+ kw['compiler'] = 'cxx'
+ if not self.env['CXX']:
+ self.fatal('a c++ compiler is required')
+ else:
+ if not self.env['CC']:
+ self.fatal('a c compiler is required')
+
+ if not 'type' in kw:
+ kw['type'] = 'cprogram'
+
+ assert not(kw['type'] != 'cprogram' and kw.get('execute', 0)), 'can only execute programs'
+
+
+ #if kw['type'] != 'program' and kw.get('execute', 0):
+ # raise ValueError, 'can only execute programs'
+
+ def to_header(dct):
+ if 'header_name' in dct:
+ dct = Utils.to_list(dct['header_name'])
+ return ''.join(['#include <%s>\n' % x for x in dct])
+ return ''
+
+ # set the file name
+ if not 'compile_mode' in kw:
+ kw['compile_mode'] = (kw['compiler'] == 'cxx') and 'cxx' or 'cc'
+
+ if not 'compile_filename' in kw:
+ kw['compile_filename'] = 'test.c' + ((kw['compile_mode'] == 'cxx') and 'pp' or '')
+
+ #OSX
+ if 'framework_name' in kw:
+ try: TaskGen.task_gen.create_task_macapp
+ except AttributeError: self.fatal('frameworks require the osx tool')
+
+ fwkname = kw['framework_name']
+ if not 'uselib_store' in kw:
+ kw['uselib_store'] = fwkname.upper()
+
+ if not kw.get('no_header', False):
+ if not 'header_name' in kw:
+ kw['header_name'] = []
+ fwk = '%s/%s.h' % (fwkname, fwkname)
+ if kw.get('remove_dot_h', None):
+ fwk = fwk[:-2]
+ kw['header_name'] = Utils.to_list(kw['header_name']) + [fwk]
+
+ kw['msg'] = 'Checking for framework %s' % fwkname
+ kw['framework'] = fwkname
+ #kw['frameworkpath'] = set it yourself
+
+ if 'function_name' in kw:
+ fu = kw['function_name']
+ if not 'msg' in kw:
+ kw['msg'] = 'Checking for function %s' % fu
+ kw['code'] = to_header(kw) + SNIP1 % fu
+ if not 'uselib_store' in kw:
+ kw['uselib_store'] = fu.upper()
+ if not 'define_name' in kw:
+ kw['define_name'] = self.have_define(fu)
+
+ elif 'type_name' in kw:
+ tu = kw['type_name']
+ if not 'msg' in kw:
+ kw['msg'] = 'Checking for type %s' % tu
+ if not 'header_name' in kw:
+ kw['header_name'] = 'stdint.h'
+ kw['code'] = to_header(kw) + SNIP2 % {'type_name' : tu}
+ if not 'define_name' in kw:
+ kw['define_name'] = self.have_define(tu.upper())
+
+ elif 'header_name' in kw:
+ if not 'msg' in kw:
+ kw['msg'] = 'Checking for header %s' % kw['header_name']
+
+ l = Utils.to_list(kw['header_name'])
+ assert len(l)>0, 'list of headers in header_name is empty'
+
+ kw['code'] = to_header(kw) + SNIP3
+
+ if not 'uselib_store' in kw:
+ kw['uselib_store'] = l[0].upper()
+
+ if not 'define_name' in kw:
+ kw['define_name'] = self.have_define(l[0])
+
+ if 'lib' in kw:
+ if not 'msg' in kw:
+ kw['msg'] = 'Checking for library %s' % kw['lib']
+ if not 'uselib_store' in kw:
+ kw['uselib_store'] = kw['lib'].upper()
+
+ if 'staticlib' in kw:
+ if not 'msg' in kw:
+ kw['msg'] = 'Checking for static library %s' % kw['staticlib']
+ if not 'uselib_store' in kw:
+ kw['uselib_store'] = kw['staticlib'].upper()
+
+ if 'fragment' in kw:
+ # an additional code fragment may be provided to replace the predefined code
+ # in custom headers
+ kw['code'] = kw['fragment']
+ if not 'msg' in kw:
+ kw['msg'] = 'Checking for custom code'
+ if not 'errmsg' in kw:
+ kw['errmsg'] = 'no'
+
+ for (flagsname,flagstype) in [('cxxflags','compiler'), ('cflags','compiler'), ('linkflags','linker')]:
+ if flagsname in kw:
+ if not 'msg' in kw:
+ kw['msg'] = 'Checking for %s flags %s' % (flagstype, kw[flagsname])
+ if not 'errmsg' in kw:
+ kw['errmsg'] = 'no'
+
+ if not 'execute' in kw:
+ kw['execute'] = False
+
+ if not 'errmsg' in kw:
+ kw['errmsg'] = 'not found'
+
+ if not 'okmsg' in kw:
+ kw['okmsg'] = 'yes'
+
+ if not 'code' in kw:
+ kw['code'] = SNIP3
+
+ if not kw.get('success'): kw['success'] = None
+
+ assert 'msg' in kw, 'invalid parameters, read http://freehackers.org/~tnagy/wafbook/single.html#config_helpers_c'
+
+@conf
+def post_check(self, *k, **kw):
+ "set the variables after a test was run successfully"
+
+ is_success = False
+ if kw['execute']:
+ if kw['success'] is not None:
+ is_success = True
+ else:
+ is_success = (kw['success'] == 0)
+
+ if 'define_name' in kw:
+ if 'header_name' in kw or 'function_name' in kw or 'type_name' in kw or 'fragment' in kw:
+ if kw['execute']:
+ key = kw['success']
+ if isinstance(key, str):
+ if key:
+ self.define(kw['define_name'], key, quote=kw.get('quote', 1))
+ else:
+ self.define_cond(kw['define_name'], True)
+ else:
+ self.define_cond(kw['define_name'], False)
+ else:
+ self.define_cond(kw['define_name'], is_success)
+
+ if is_success and 'uselib_store' in kw:
+ import cc, cxx
+ for k in set(cc.g_cc_flag_vars).union(cxx.g_cxx_flag_vars):
+ lk = k.lower()
+ # inconsistency: includes -> CPPPATH
+ if k == 'CPPPATH': lk = 'includes'
+ if k == 'CXXDEFINES': lk = 'defines'
+ if k == 'CCDEFINES': lk = 'defines'
+ if lk in kw:
+ val = kw[lk]
+ # remove trailing slash
+ if isinstance(val, str):
+ val = val.rstrip(os.path.sep)
+ self.env.append_unique(k + '_' + kw['uselib_store'], val)
+
+@conf
+def check(self, *k, **kw):
+ # so this will be the generic function
+ # it will be safer to use check_cxx or check_cc
+ self.validate_c(kw)
+ self.check_message_1(kw['msg'])
+ ret = None
+ try:
+ ret = self.run_c_code(*k, **kw)
+ except Configure.ConfigurationError, e:
+ self.check_message_2(kw['errmsg'], 'YELLOW')
+ if 'mandatory' in kw and kw['mandatory']:
+ if Logs.verbose > 1:
+ raise
+ else:
+ self.fatal('the configuration failed (see %r)' % self.log.name)
+ else:
+ kw['success'] = ret
+ self.check_message_2(self.ret_msg(kw['okmsg'], kw))
+
+ self.post_check(*k, **kw)
+ if not kw.get('execute', False):
+ return ret == 0
+ return ret
+
+@conf
+def run_c_code(self, *k, **kw):
+ test_f_name = kw['compile_filename']
+
+ k = 0
+ while k < 10000:
+ # make certain to use a fresh folder - necessary for win32
+ dir = os.path.join(self.blddir, '.conf_check_%d' % k)
+
+ # if the folder already exists, remove it
+ try:
+ shutil.rmtree(dir)
+ except OSError:
+ pass
+
+ try:
+ os.stat(dir)
+ except OSError:
+ break
+
+ k += 1
+
+ try:
+ os.makedirs(dir)
+ except:
+ self.fatal('cannot create a configuration test folder %r' % dir)
+
+ try:
+ os.stat(dir)
+ except:
+ self.fatal('cannot use the configuration test folder %r' % dir)
+
+ bdir = os.path.join(dir, 'testbuild')
+
+ if not os.path.exists(bdir):
+ os.makedirs(bdir)
+
+ env = kw['env']
+
+ dest = open(os.path.join(dir, test_f_name), 'w')
+ dest.write(kw['code'])
+ dest.close()
+
+ back = os.path.abspath('.')
+
+ bld = Build.BuildContext()
+ bld.log = self.log
+ bld.all_envs.update(self.all_envs)
+ bld.all_envs['default'] = env
+ bld.lst_variants = bld.all_envs.keys()
+ bld.load_dirs(dir, bdir)
+
+ os.chdir(dir)
+
+ bld.rescan(bld.srcnode)
+
+ if not 'features' in kw:
+ # conf.check(features='cc cprogram pyext', ...)
+ kw['features'] = [kw['compile_mode'], kw['type']] # "cprogram cc"
+
+ o = bld(features=kw['features'], source=test_f_name, target='testprog')
+
+ for k, v in kw.iteritems():
+ setattr(o, k, v)
+
+ self.log.write("==>\n%s\n<==\n" % kw['code'])
+
+ # compile the program
+ try:
+ bld.compile()
+ except Utils.WafError:
+ ret = Utils.ex_stack()
+ else:
+ ret = 0
+
+ # chdir before returning
+ os.chdir(back)
+
+ if ret:
+ self.log.write('command returned %r' % ret)
+ self.fatal(str(ret))
+
+ # if we need to run the program, try to get its result
+ # keep the name of the program to execute
+ if kw['execute']:
+ lastprog = o.link_task.outputs[0].abspath(env)
+
+ args = Utils.to_list(kw.get('exec_args', []))
+ proc = Utils.pproc.Popen([lastprog] + args, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE)
+ (out, err) = proc.communicate()
+ w = self.log.write
+ w(str(out))
+ w('\n')
+ w(str(err))
+ w('\n')
+ w('returncode %r' % proc.returncode)
+ w('\n')
+ if proc.returncode:
+ self.fatal(Utils.ex_stack())
+ ret = out
+
+ return ret
+
+@conf
+def check_cxx(self, *k, **kw):
+ kw['compiler'] = 'cxx'
+ return self.check(*k, **kw)
+
+@conf
+def check_cc(self, *k, **kw):
+ kw['compiler'] = 'cc'
+ return self.check(*k, **kw)
+
+@conf
+def define(self, define, value, quote=1):
+ """store a single define and its state into an internal list for later
+ writing to a config header file. Value can only be
+ a string or int; other types not supported. String
+ values will appear properly quoted in the generated
+ header file."""
+ assert define and isinstance(define, str)
+
+ # ordered_dict is for writing the configuration header in order
+ tbl = self.env[DEFINES] or Utils.ordered_dict()
+
+ # the user forgot to tell if the value is quoted or not
+ if isinstance(value, str):
+ if quote:
+ tbl[define] = '"%s"' % repr('"'+value)[2:-1].replace('"', '\\"')
+ else:
+ tbl[define] = value
+ elif isinstance(value, int):
+ tbl[define] = value
+ else:
+ raise TypeError('define %r -> %r must be a string or an int' % (define, value))
+
+ # add later to make reconfiguring faster
+ self.env[DEFINES] = tbl
+ self.env[define] = value # <- not certain this is necessary
+
+@conf
+def undefine(self, define):
+ """store a single define and its state into an internal list
+ for later writing to a config header file"""
+ assert define and isinstance(define, str)
+
+ tbl = self.env[DEFINES] or Utils.ordered_dict()
+
+ value = UNDEFINED
+ tbl[define] = value
+
+ # add later to make reconfiguring faster
+ self.env[DEFINES] = tbl
+ self.env[define] = value
+
+@conf
+def define_cond(self, name, value):
+ """Conditionally define a name.
+ Formally equivalent to: if value: define(name, 1) else: undefine(name)"""
+ if value:
+ self.define(name, 1)
+ else:
+ self.undefine(name)
+
+@conf
+def is_defined(self, key):
+ defines = self.env[DEFINES]
+ if not defines:
+ return False
+ try:
+ value = defines[key]
+ except KeyError:
+ return False
+ else:
+ return value != UNDEFINED
+
+@conf
+def get_define(self, define):
+ "get the value of a previously stored define"
+ try: return self.env[DEFINES][define]
+ except KeyError: return None
+
+@conf
+def have_define(self, name):
+ "prefix the define with 'HAVE_' and make sure it has valid characters."
+ return self.__dict__.get('HAVE_PAT', 'HAVE_%s') % Utils.quote_define_name(name)
+
+@conf
+def write_config_header(self, configfile='', env='', guard='', top=False):
+ "save the defines into a file"
+ if not configfile: configfile = WAF_CONFIG_H
+ waf_guard = guard or '_%s_WAF' % Utils.quote_define_name(configfile)
+
+ # configfile -> absolute path
+ # there is a good reason to concatenate first and to split afterwards
+ if not env: env = self.env
+ if top:
+ diff = ''
+ else:
+ diff = Utils.diff_path(self.srcdir, self.curdir)
+ full = os.sep.join([self.blddir, env.variant(), diff, configfile])
+ full = os.path.normpath(full)
+ (dir, base) = os.path.split(full)
+
+ try: os.makedirs(dir)
+ except: pass
+
+ dest = open(full, 'w')
+ dest.write('/* Configuration header created by Waf - do not edit */\n')
+ dest.write('#ifndef %s\n#define %s\n\n' % (waf_guard, waf_guard))
+
+ dest.write(self.get_config_header())
+
+ # config files are not removed on "waf clean"
+ env.append_unique(CFG_FILES, os.path.join(diff, configfile))
+
+ dest.write('\n#endif /* %s */\n' % waf_guard)
+ dest.close()
+
+@conf
+def get_config_header(self):
+ """Fill-in the contents of the config header. Override when you need to write your own config header."""
+ config_header = []
+
+ tbl = self.env[DEFINES] or Utils.ordered_dict()
+ for key in tbl.allkeys:
+ value = tbl[key]
+ if value is None:
+ config_header.append('#define %s' % key)
+ elif value is UNDEFINED:
+ config_header.append('/* #undef %s */' % key)
+ else:
+ config_header.append('#define %s %s' % (key, value))
+ return "\n".join(config_header)
+
+@conftest
+def find_cpp(conf):
+ v = conf.env
+ cpp = []
+ if v['CPP']: cpp = v['CPP']
+ elif 'CPP' in conf.environ: cpp = conf.environ['CPP']
+ if not cpp: cpp = conf.find_program('cpp', var='CPP')
+ #if not cpp: cpp = v['CC']
+ #if not cpp: cpp = v['CXX']
+ v['CPP'] = cpp
+
+@conftest
+def cc_add_flags(conf):
+ conf.add_os_flags('CFLAGS', 'CCFLAGS')
+ conf.add_os_flags('CPPFLAGS')
+
+@conftest
+def cxx_add_flags(conf):
+ conf.add_os_flags('CXXFLAGS')
+ conf.add_os_flags('CPPFLAGS')
+
+@conftest
+def link_add_flags(conf):
+ conf.add_os_flags('LINKFLAGS')
+ conf.add_os_flags('LDFLAGS', 'LINKFLAGS')
+
+@conftest
+def cc_load_tools(conf):
+ conf.check_tool('cc')
+
+@conftest
+def cxx_load_tools(conf):
+ conf.check_tool('cxx')
diff --git a/third_party/waf/wafadmin/Tools/cs.py b/third_party/waf/wafadmin/Tools/cs.py
new file mode 100644
index 0000000..4c987d2
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/cs.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006 (ita)
+
+"C# support"
+
+import TaskGen, Utils, Task, Options
+from Logs import error
+from TaskGen import before, after, taskgen, feature
+
+flag_vars= ['FLAGS', 'ASSEMBLIES']
+
+@feature('cs')
+def init_cs(self):
+ Utils.def_attrs(self,
+ flags = '',
+ assemblies = '',
+ resources = '',
+ uselib = '')
+
+@feature('cs')
+@after('init_cs')
+def apply_uselib_cs(self):
+ if not self.uselib:
+ return
+ global flag_vars
+ for var in self.to_list(self.uselib):
+ for v in self.flag_vars:
+ val = self.env[v+'_'+var]
+ if val: self.env.append_value(v, val)
+
+@feature('cs')
+@after('apply_uselib_cs')
+@before('apply_core')
+def apply_cs(self):
+ try: self.meths.remove('apply_core')
+ except ValueError: pass
+
+ # process the flags for the assemblies
+ for i in self.to_list(self.assemblies) + self.env['ASSEMBLIES']:
+ self.env.append_unique('_ASSEMBLIES', '/r:'+i)
+
+ # process the flags for the resources
+ for i in self.to_list(self.resources):
+ self.env.append_unique('_RESOURCES', '/resource:'+i)
+
+ # what kind of assembly are we generating?
+ self.env['_TYPE'] = getattr(self, 'type', 'exe')
+
+ # additional flags
+ self.env.append_unique('_FLAGS', self.to_list(self.flags))
+ self.env.append_unique('_FLAGS', self.env.FLAGS)
+
+ # process the sources
+ nodes = [self.path.find_resource(i) for i in self.to_list(self.source)]
+ self.create_task('mcs', nodes, self.path.find_or_declare(self.target))
+
+Task.simple_task_type('mcs', '${MCS} ${SRC} /target:${_TYPE} /out:${TGT} ${_FLAGS} ${_ASSEMBLIES} ${_RESOURCES}', color='YELLOW')
+
+def detect(conf):
+ csc = getattr(Options.options, 'cscbinary', None)
+ if csc:
+ conf.env.MCS = csc
+ conf.find_program(['gmcs', 'mcs'], var='MCS')
+
+def set_options(opt):
+ opt.add_option('--with-csc-binary', type='string', dest='cscbinary')
diff --git a/third_party/waf/wafadmin/Tools/cxx.py b/third_party/waf/wafadmin/Tools/cxx.py
new file mode 100644
index 0000000..184fee3
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/cxx.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2005 (ita)
+
+"Base for c++ programs and libraries"
+
+import TaskGen, Task, Utils
+from Logs import debug
+import ccroot # <- do not remove
+from TaskGen import feature, before, extension, after
+
+g_cxx_flag_vars = [
+'CXXDEPS', 'FRAMEWORK', 'FRAMEWORKPATH',
+'STATICLIB', 'LIB', 'LIBPATH', 'LINKFLAGS', 'RPATH',
+'CXXFLAGS', 'CCFLAGS', 'CPPPATH', 'CPPFLAGS', 'CXXDEFINES']
+"main cpp variables"
+
+EXT_CXX = ['.cpp', '.cc', '.cxx', '.C', '.c++']
+
+g_cxx_type_vars=['CXXFLAGS', 'LINKFLAGS']
+
+# TODO remove in waf 1.6
+class cxx_taskgen(ccroot.ccroot_abstract):
+ pass
+
+@feature('cxx')
+@before('apply_type_vars')
+@after('default_cc')
+def init_cxx(self):
+ if not 'cc' in self.features:
+ self.mappings['.c'] = TaskGen.task_gen.mappings['.cxx']
+
+ self.p_flag_vars = set(self.p_flag_vars).union(g_cxx_flag_vars)
+ self.p_type_vars = set(self.p_type_vars).union(g_cxx_type_vars)
+
+ if not self.env['CXX_NAME']:
+ raise Utils.WafError("At least one compiler (g++, ..) must be selected")
+
+@feature('cxx')
+@after('apply_incpaths')
+def apply_obj_vars_cxx(self):
+ """after apply_incpaths for INC_PATHS"""
+ env = self.env
+ app = env.append_unique
+ cxxpath_st = env['CPPPATH_ST']
+
+ # local flags come first
+ # set the user-defined includes paths
+ for i in env['INC_PATHS']:
+ app('_CXXINCFLAGS', cxxpath_st % i.bldpath(env))
+ app('_CXXINCFLAGS', cxxpath_st % i.srcpath(env))
+
+ # set the library include paths
+ for i in env['CPPPATH']:
+ app('_CXXINCFLAGS', cxxpath_st % i)
+
+@feature('cxx')
+@after('apply_lib_vars')
+def apply_defines_cxx(self):
+ """after uselib is set for CXXDEFINES"""
+ self.defines = getattr(self, 'defines', [])
+ lst = self.to_list(self.defines) + self.to_list(self.env['CXXDEFINES'])
+ milst = []
+
+ # now process the local defines
+ for defi in lst:
+ if not defi in milst:
+ milst.append(defi)
+
+ # CXXDEFINES_USELIB
+ libs = self.to_list(self.uselib)
+ for l in libs:
+ val = self.env['CXXDEFINES_'+l]
+ if val: milst += self.to_list(val)
+
+ self.env['DEFLINES'] = ["%s %s" % (x[0], Utils.trimquotes('='.join(x[1:]))) for x in [y.split('=') for y in milst]]
+ y = self.env['CXXDEFINES_ST']
+ self.env.append_unique('_CXXDEFFLAGS', [y%x for x in milst])
+
+@extension(EXT_CXX)
+def cxx_hook(self, node):
+ # create the compilation task: cpp or cc
+ if getattr(self, 'obj_ext', None):
+ obj_ext = self.obj_ext
+ else:
+ obj_ext = '_%d.o' % self.idx
+
+ task = self.create_task('cxx', node, node.change_ext(obj_ext))
+ try:
+ self.compiled_tasks.append(task)
+ except AttributeError:
+ raise Utils.WafError('Have you forgotten to set the feature "cxx" on %s?' % str(self))
+ return task
+
+cxx_str = '${CXX} ${CXXFLAGS} ${CPPFLAGS} ${_CXXINCFLAGS} ${_CXXDEFFLAGS} ${CXX_SRC_F}${SRC} ${CXX_TGT_F}${TGT}'
+cls = Task.simple_task_type('cxx', cxx_str, color='GREEN', ext_out='.o', ext_in='.cxx', shell=False)
+cls.scan = ccroot.scan
+cls.vars.append('CXXDEPS')
+
+link_str = '${LINK_CXX} ${CXXLNK_SRC_F}${SRC} ${CXXLNK_TGT_F}${TGT[0].abspath(env)} ${LINKFLAGS}'
+cls = Task.simple_task_type('cxx_link', link_str, color='YELLOW', ext_in='.o', ext_out='.bin', shell=False)
+cls.maxjobs = 1
+cls.install = Utils.nada
diff --git a/third_party/waf/wafadmin/Tools/d.py b/third_party/waf/wafadmin/Tools/d.py
new file mode 100644
index 0000000..2c2e948
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/d.py
@@ -0,0 +1,534 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Carlos Rafael Giani, 2007 (dv)
+# Thomas Nagy, 2007-2008 (ita)
+
+import os, sys, re, optparse
+import ccroot # <- leave this
+import TaskGen, Utils, Task, Configure, Logs, Build
+from Logs import debug, error
+from TaskGen import taskgen, feature, after, before, extension
+from Configure import conftest
+
+EXT_D = ['.d', '.di', '.D']
+D_METHS = ['apply_core', 'apply_vnum', 'apply_objdeps'] # additional d methods
+
+DLIB = """
+version(D_Version2) {
+ import std.stdio;
+ int main() {
+ writefln("phobos2");
+ return 0;
+ }
+} else {
+ version(Tango) {
+ import tango.stdc.stdio;
+ int main() {
+ printf("tango");
+ return 0;
+ }
+ } else {
+ import std.stdio;
+ int main() {
+ writefln("phobos1");
+ return 0;
+ }
+ }
+}
+"""
+
+def filter_comments(filename):
+ txt = Utils.readf(filename)
+ i = 0
+ buf = []
+ max = len(txt)
+ begin = 0
+ while i < max:
+ c = txt[i]
+ if c == '"' or c == "'": # skip a string or character literal
+ buf.append(txt[begin:i])
+ delim = c
+ i += 1
+ while i < max:
+ c = txt[i]
+ if c == delim: break
+ elif c == '\\': # skip the character following backslash
+ i += 1
+ i += 1
+ i += 1
+ begin = i
+ elif c == '/': # try to replace a comment with whitespace
+ buf.append(txt[begin:i])
+ i += 1
+ if i == max: break
+ c = txt[i]
+ if c == '+': # eat nesting /+ +/ comment
+ i += 1
+ nesting = 1
+ c = None
+ while i < max:
+ prev = c
+ c = txt[i]
+ if prev == '/' and c == '+':
+ nesting += 1
+ c = None
+ elif prev == '+' and c == '/':
+ nesting -= 1
+ if nesting == 0: break
+ c = None
+ i += 1
+ elif c == '*': # eat /* */ comment
+ i += 1
+ c = None
+ while i < max:
+ prev = c
+ c = txt[i]
+ if prev == '*' and c == '/': break
+ i += 1
+ elif c == '/': # eat // comment
+ i += 1
+ while i < max and txt[i] != '\n':
+ i += 1
+ else: # no comment
+ begin = i - 1
+ continue
+ i += 1
+ begin = i
+ buf.append(' ')
+ else:
+ i += 1
+ buf.append(txt[begin:])
+ return buf
+
+class d_parser(object):
+ def __init__(self, env, incpaths):
+ #self.code = ''
+ #self.module = ''
+ #self.imports = []
+
+ self.allnames = []
+
+ self.re_module = re.compile("module\s+([^;]+)")
+ self.re_import = re.compile("import\s+([^;]+)")
+ self.re_import_bindings = re.compile("([^:]+):(.*)")
+ self.re_import_alias = re.compile("[^=]+=(.+)")
+
+ self.env = env
+
+ self.nodes = []
+ self.names = []
+
+ self.incpaths = incpaths
+
+ def tryfind(self, filename):
+ found = 0
+ for n in self.incpaths:
+ found = n.find_resource(filename.replace('.', '/') + '.d')
+ if found:
+ self.nodes.append(found)
+ self.waiting.append(found)
+ break
+ if not found:
+ if not filename in self.names:
+ self.names.append(filename)
+
+ def get_strings(self, code):
+ #self.imports = []
+ self.module = ''
+ lst = []
+
+ # get the module name (if present)
+
+ mod_name = self.re_module.search(code)
+ if mod_name:
+ self.module = re.sub('\s+', '', mod_name.group(1)) # strip all whitespaces
+
+ # go through the code, have a look at all import occurrences
+
+ # first, lets look at anything beginning with "import" and ending with ";"
+ import_iterator = self.re_import.finditer(code)
+ if import_iterator:
+ for import_match in import_iterator:
+ import_match_str = re.sub('\s+', '', import_match.group(1)) # strip all whitespaces
+
+ # does this end with an import bindings declaration?
+ # (import bindings always terminate the list of imports)
+ bindings_match = self.re_import_bindings.match(import_match_str)
+ if bindings_match:
+ import_match_str = bindings_match.group(1)
+ # if so, extract the part before the ":" (since the module declaration(s) is/are located there)
+
+ # split the matching string into a bunch of strings, separated by a comma
+ matches = import_match_str.split(',')
+
+ for match in matches:
+ alias_match = self.re_import_alias.match(match)
+ if alias_match:
+ # is this an alias declaration? (alias = module name) if so, extract the module name
+ match = alias_match.group(1)
+
+ lst.append(match)
+ return lst
+
+ def start(self, node):
+ self.waiting = [node]
+ # while the stack is not empty, add the dependencies
+ while self.waiting:
+ nd = self.waiting.pop(0)
+ self.iter(nd)
+
+ def iter(self, node):
+ path = node.abspath(self.env) # obtain the absolute path
+ code = "".join(filter_comments(path)) # read the file and filter the comments
+ names = self.get_strings(code) # obtain the import strings
+ for x in names:
+ # optimization
+ if x in self.allnames: continue
+ self.allnames.append(x)
+
+ # for each name, see if it is like a node or not
+ self.tryfind(x)
+
+def scan(self):
+ "look for .d/.di the .d source need"
+ env = self.env
+ gruik = d_parser(env, env['INC_PATHS'])
+ gruik.start(self.inputs[0])
+
+ if Logs.verbose:
+ debug('deps: nodes found for %s: %s %s' % (str(self.inputs[0]), str(gruik.nodes), str(gruik.names)))
+ #debug("deps found for %s: %s" % (str(node), str(gruik.deps)), 'deps')
+ return (gruik.nodes, gruik.names)
+
+def get_target_name(self):
+ "for d programs and libs"
+ v = self.env
+ tp = 'program'
+ for x in self.features:
+ if x in ['dshlib', 'dstaticlib']:
+ tp = x.lstrip('d')
+ return v['D_%s_PATTERN' % tp] % self.target
+
+d_params = {
+'dflags': '',
+'importpaths':'',
+'libs':'',
+'libpaths':'',
+'generate_headers':False,
+}
+
+@feature('d')
+@before('apply_type_vars')
+def init_d(self):
+ for x in d_params:
+ setattr(self, x, getattr(self, x, d_params[x]))
+
+class d_taskgen(TaskGen.task_gen):
+ def __init__(self, *k, **kw):
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+ # COMPAT
+ if len(k) > 1:
+ self.features.append('d' + k[1])
+
+# okay, we borrow a few methods from ccroot
+TaskGen.bind_feature('d', D_METHS)
+
+@feature('d')
+@before('apply_d_libs')
+def init_d(self):
+ Utils.def_attrs(self,
+ dflags='',
+ importpaths='',
+ libs='',
+ libpaths='',
+ uselib='',
+ uselib_local='',
+ generate_headers=False, # set to true if you want .di files as well as .o
+ compiled_tasks=[],
+ add_objects=[],
+ link_task=None)
+
+@feature('d')
+@after('apply_d_link', 'init_d')
+@before('apply_vnum', 'apply_d_vars')
+def apply_d_libs(self):
+ """after apply_link because of 'link_task'
+ after default_cc because of the attribute 'uselib'"""
+ env = self.env
+
+ # 1. the case of the libs defined in the project (visit ancestors first)
+ # the ancestors external libraries (uselib) will be prepended
+ self.uselib = self.to_list(self.uselib)
+ names = self.to_list(self.uselib_local)
+
+ seen = set([])
+ tmp = Utils.deque(names) # consume a copy of the list of names
+ while tmp:
+ lib_name = tmp.popleft()
+ # visit dependencies only once
+ if lib_name in seen:
+ continue
+
+ y = self.name_to_obj(lib_name)
+ if not y:
+ raise Utils.WafError('object %r was not found in uselib_local (required by %r)' % (lib_name, self.name))
+ y.post()
+ seen.add(lib_name)
+
+ # object has ancestors to process (shared libraries): add them to the end of the list
+ if getattr(y, 'uselib_local', None):
+ lst = y.to_list(y.uselib_local)
+ if 'dshlib' in y.features or 'dprogram' in y.features:
+ lst = [x for x in lst if not 'dstaticlib' in self.name_to_obj(x).features]
+ tmp.extend(lst)
+
+ # link task and flags
+ if getattr(y, 'link_task', None):
+
+ link_name = y.target[y.target.rfind(os.sep) + 1:]
+ if 'dstaticlib' in y.features or 'dshlib' in y.features:
+ env.append_unique('DLINKFLAGS', env.DLIB_ST % link_name)
+ env.append_unique('DLINKFLAGS', env.DLIBPATH_ST % y.link_task.outputs[0].parent.bldpath(env))
+
+ # the order
+ self.link_task.set_run_after(y.link_task)
+
+ # for the recompilation
+ dep_nodes = getattr(self.link_task, 'dep_nodes', [])
+ self.link_task.dep_nodes = dep_nodes + y.link_task.outputs
+
+ # add ancestors uselib too - but only propagate those that have no staticlib
+ for v in self.to_list(y.uselib):
+ if not v in self.uselib:
+ self.uselib.insert(0, v)
+
+ # if the library task generator provides 'export_incdirs', add to the include path
+ # the export_incdirs must be a list of paths relative to the other library
+ if getattr(y, 'export_incdirs', None):
+ for x in self.to_list(y.export_incdirs):
+ node = y.path.find_dir(x)
+ if not node:
+ raise Utils.WafError('object %r: invalid folder %r in export_incdirs' % (y.target, x))
+ self.env.append_unique('INC_PATHS', node)
+
+@feature('dprogram', 'dshlib', 'dstaticlib')
+@after('apply_core')
+def apply_d_link(self):
+ link = getattr(self, 'link', None)
+ if not link:
+ if 'dstaticlib' in self.features: link = 'static_link'
+ else: link = 'd_link'
+
+ outputs = [t.outputs[0] for t in self.compiled_tasks]
+ self.link_task = self.create_task(link, outputs, self.path.find_or_declare(get_target_name(self)))
+
+@feature('d')
+@after('apply_core')
+def apply_d_vars(self):
+ env = self.env
+ dpath_st = env['DPATH_ST']
+ lib_st = env['DLIB_ST']
+ libpath_st = env['DLIBPATH_ST']
+
+ importpaths = self.to_list(self.importpaths)
+ libpaths = []
+ libs = []
+ uselib = self.to_list(self.uselib)
+
+ for i in uselib:
+ if env['DFLAGS_' + i]:
+ env.append_unique('DFLAGS', env['DFLAGS_' + i])
+
+ for x in self.features:
+ if not x in ['dprogram', 'dstaticlib', 'dshlib']:
+ continue
+ x.lstrip('d')
+ d_shlib_dflags = env['D_' + x + '_DFLAGS']
+ if d_shlib_dflags:
+ env.append_unique('DFLAGS', d_shlib_dflags)
+
+ # add import paths
+ for i in uselib:
+ if env['DPATH_' + i]:
+ for entry in self.to_list(env['DPATH_' + i]):
+ if not entry in importpaths:
+ importpaths.append(entry)
+
+ # now process the import paths
+ for path in importpaths:
+ if os.path.isabs(path):
+ env.append_unique('_DIMPORTFLAGS', dpath_st % path)
+ else:
+ node = self.path.find_dir(path)
+ self.env.append_unique('INC_PATHS', node)
+ env.append_unique('_DIMPORTFLAGS', dpath_st % node.srcpath(env))
+ env.append_unique('_DIMPORTFLAGS', dpath_st % node.bldpath(env))
+
+ # add library paths
+ for i in uselib:
+ if env['LIBPATH_' + i]:
+ for entry in self.to_list(env['LIBPATH_' + i]):
+ if not entry in libpaths:
+ libpaths.append(entry)
+ libpaths = self.to_list(self.libpaths) + libpaths
+
+ # now process the library paths
+ # apply same path manipulation as used with import paths
+ for path in libpaths:
+ if not os.path.isabs(path):
+ node = self.path.find_resource(path)
+ if not node:
+ raise Utils.WafError('could not find libpath %r from %r' % (path, self))
+ path = node.abspath(self.env)
+
+ env.append_unique('DLINKFLAGS', libpath_st % path)
+
+ # add libraries
+ for i in uselib:
+ if env['LIB_' + i]:
+ for entry in self.to_list(env['LIB_' + i]):
+ if not entry in libs:
+ libs.append(entry)
+ libs.extend(self.to_list(self.libs))
+
+ # process user flags
+ for flag in self.to_list(self.dflags):
+ env.append_unique('DFLAGS', flag)
+
+ # now process the libraries
+ for lib in libs:
+ env.append_unique('DLINKFLAGS', lib_st % lib)
+
+ # add linker flags
+ for i in uselib:
+ dlinkflags = env['DLINKFLAGS_' + i]
+ if dlinkflags:
+ for linkflag in dlinkflags:
+ env.append_unique('DLINKFLAGS', linkflag)
+
+@feature('dshlib')
+@after('apply_d_vars')
+def add_shlib_d_flags(self):
+ for linkflag in self.env['D_shlib_LINKFLAGS']:
+ self.env.append_unique('DLINKFLAGS', linkflag)
+
+@extension(EXT_D)
+def d_hook(self, node):
+ # create the compilation task: cpp or cc
+ task = self.create_task(self.generate_headers and 'd_with_header' or 'd')
+ try: obj_ext = self.obj_ext
+ except AttributeError: obj_ext = '_%d.o' % self.idx
+
+ task.inputs = [node]
+ task.outputs = [node.change_ext(obj_ext)]
+ self.compiled_tasks.append(task)
+
+ if self.generate_headers:
+ header_node = node.change_ext(self.env['DHEADER_ext'])
+ task.outputs += [header_node]
+
+d_str = '${D_COMPILER} ${DFLAGS} ${_DIMPORTFLAGS} ${D_SRC_F}${SRC} ${D_TGT_F}${TGT}'
+d_with_header_str = '${D_COMPILER} ${DFLAGS} ${_DIMPORTFLAGS} \
+${D_HDR_F}${TGT[1].bldpath(env)} \
+${D_SRC_F}${SRC} \
+${D_TGT_F}${TGT[0].bldpath(env)}'
+link_str = '${D_LINKER} ${DLNK_SRC_F}${SRC} ${DLNK_TGT_F}${TGT} ${DLINKFLAGS}'
+
+def override_exec(cls):
+ """stupid dmd wants -of stuck to the file name"""
+ old_exec = cls.exec_command
+ def exec_command(self, *k, **kw):
+ if isinstance(k[0], list):
+ lst = k[0]
+ for i in xrange(len(lst)):
+ if lst[i] == '-of':
+ del lst[i]
+ lst[i] = '-of' + lst[i]
+ break
+ return old_exec(self, *k, **kw)
+ cls.exec_command = exec_command
+
+cls = Task.simple_task_type('d', d_str, 'GREEN', before='static_link d_link', shell=False)
+cls.scan = scan
+override_exec(cls)
+
+cls = Task.simple_task_type('d_with_header', d_with_header_str, 'GREEN', before='static_link d_link', shell=False)
+override_exec(cls)
+
+cls = Task.simple_task_type('d_link', link_str, color='YELLOW', shell=False)
+override_exec(cls)
+
+# for feature request #104
+@taskgen
+def generate_header(self, filename, install_path):
+ if not hasattr(self, 'header_lst'): self.header_lst = []
+ self.meths.append('process_header')
+ self.header_lst.append([filename, install_path])
+
+@before('apply_core')
+def process_header(self):
+ env = self.env
+ for i in getattr(self, 'header_lst', []):
+ node = self.path.find_resource(i[0])
+
+ if not node:
+ raise Utils.WafError('file not found on d obj '+i[0])
+
+ task = self.create_task('d_header')
+ task.set_inputs(node)
+ task.set_outputs(node.change_ext('.di'))
+
+d_header_str = '${D_COMPILER} ${D_HEADER} ${SRC}'
+Task.simple_task_type('d_header', d_header_str, color='BLUE', shell=False)
+
+@conftest
+def d_platform_flags(conf):
+ v = conf.env
+ binfmt = v.DEST_BINFMT or Utils.unversioned_sys_platform_to_binary_format(
+ v.DEST_OS or Utils.unversioned_sys_platform())
+ if binfmt == 'pe':
+ v['D_program_PATTERN'] = '%s.exe'
+ v['D_shlib_PATTERN'] = 'lib%s.dll'
+ v['D_staticlib_PATTERN'] = 'lib%s.a'
+ else:
+ v['D_program_PATTERN'] = '%s'
+ v['D_shlib_PATTERN'] = 'lib%s.so'
+ v['D_staticlib_PATTERN'] = 'lib%s.a'
+
+@conftest
+def check_dlibrary(conf):
+ ret = conf.check_cc(features='d dprogram', fragment=DLIB, mandatory=True, compile_filename='test.d', execute=True)
+ conf.env.DLIBRARY = ret.strip()
+
+# quick test #
+if __name__ == "__main__":
+ #Logs.verbose = 2
+
+ try: arg = sys.argv[1]
+ except IndexError: arg = "file.d"
+
+ print("".join(filter_comments(arg)))
+ # TODO
+ paths = ['.']
+
+ #gruik = filter()
+ #gruik.start(arg)
+
+ #code = "".join(gruik.buf)
+
+ #print "we have found the following code"
+ #print code
+
+ #print "now parsing"
+ #print "-------------------------------------------"
+ """
+ parser_ = d_parser()
+ parser_.start(arg)
+
+ print "module: %s" % parser_.module
+ print "imports: ",
+ for imp in parser_.imports:
+ print imp + " ",
+ print
+"""
diff --git a/third_party/waf/wafadmin/Tools/dbus.py b/third_party/waf/wafadmin/Tools/dbus.py
new file mode 100644
index 0000000..42c4ca2
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/dbus.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Ali Sabil, 2007
+
+import Task, Utils
+from TaskGen import taskgen, before, after, feature
+
+@taskgen
+def add_dbus_file(self, filename, prefix, mode):
+ if not hasattr(self, 'dbus_lst'):
+ self.dbus_lst = []
+ self.meths.append('process_dbus')
+ self.dbus_lst.append([filename, prefix, mode])
+
+@before('apply_core')
+def process_dbus(self):
+ for filename, prefix, mode in getattr(self, 'dbus_lst', []):
+ node = self.path.find_resource(filename)
+
+ if not node:
+ raise Utils.WafError('file not found ' + filename)
+
+ tsk = self.create_task('dbus_binding_tool', node, node.change_ext('.h'))
+
+ tsk.env.DBUS_BINDING_TOOL_PREFIX = prefix
+ tsk.env.DBUS_BINDING_TOOL_MODE = mode
+
+Task.simple_task_type('dbus_binding_tool',
+ '${DBUS_BINDING_TOOL} --prefix=${DBUS_BINDING_TOOL_PREFIX} --mode=${DBUS_BINDING_TOOL_MODE} --output=${TGT} ${SRC}',
+ color='BLUE', before='cc')
+
+def detect(conf):
+ dbus_binding_tool = conf.find_program('dbus-binding-tool', var='DBUS_BINDING_TOOL')
diff --git a/third_party/waf/wafadmin/Tools/dmd.py b/third_party/waf/wafadmin/Tools/dmd.py
new file mode 100644
index 0000000..b86ffd6
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/dmd.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Carlos Rafael Giani, 2007 (dv)
+# Thomas Nagy, 2008 (ita)
+
+import sys
+import Utils, ar
+from Configure import conftest
+
+@conftest
+def find_dmd(conf):
+ conf.find_program(['dmd', 'ldc'], var='D_COMPILER', mandatory=True)
+
+@conftest
+def common_flags_ldc(conf):
+ v = conf.env
+ v['DFLAGS'] = ['-d-version=Posix']
+ v['DLINKFLAGS'] = []
+ v['D_shlib_DFLAGS'] = ['-relocation-model=pic']
+
+@conftest
+def common_flags_dmd(conf):
+ v = conf.env
+
+ # _DFLAGS _DIMPORTFLAGS
+
+ # Compiler is dmd so 'gdc' part will be ignored, just
+ # ensure key is there, so wscript can append flags to it
+ v['DFLAGS'] = ['-version=Posix']
+
+ v['D_SRC_F'] = ''
+ v['D_TGT_F'] = ['-c', '-of']
+ v['DPATH_ST'] = '-I%s' # template for adding import paths
+
+ # linker
+ v['D_LINKER'] = v['D_COMPILER']
+ v['DLNK_SRC_F'] = ''
+ v['DLNK_TGT_F'] = '-of'
+
+ v['DLIB_ST'] = '-L-l%s' # template for adding libs
+ v['DLIBPATH_ST'] = '-L-L%s' # template for adding libpaths
+
+ # linker debug levels
+ v['DFLAGS_OPTIMIZED'] = ['-O']
+ v['DFLAGS_DEBUG'] = ['-g', '-debug']
+ v['DFLAGS_ULTRADEBUG'] = ['-g', '-debug']
+ v['DLINKFLAGS'] = ['-quiet']
+
+ v['D_shlib_DFLAGS'] = ['-fPIC']
+ v['D_shlib_LINKFLAGS'] = ['-L-shared']
+
+ v['DHEADER_ext'] = '.di'
+ v['D_HDR_F'] = ['-H', '-Hf']
+
+def detect(conf):
+ conf.find_dmd()
+ conf.check_tool('ar')
+ conf.check_tool('d')
+ conf.common_flags_dmd()
+ conf.d_platform_flags()
+
+ if conf.env.D_COMPILER.find('ldc') > -1:
+ conf.common_flags_ldc()
diff --git a/third_party/waf/wafadmin/Tools/flex.py b/third_party/waf/wafadmin/Tools/flex.py
new file mode 100644
index 0000000..cbea42d
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/flex.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# John O'Meara, 2006
+# Thomas Nagy, 2006-2008
+
+"Flex processing"
+
+import TaskGen
+
+def decide_ext(self, node):
+ if 'cxx' in self.features: return '.lex.cc'
+ else: return '.lex.c'
+
+TaskGen.declare_chain(
+ name = 'flex',
+ rule = '${FLEX} -o${TGT} ${FLEXFLAGS} ${SRC}',
+ ext_in = '.l',
+ ext_out = '.c .cxx',
+ decider = decide_ext
+)
+
+def detect(conf):
+ conf.find_program('flex', var='FLEX', mandatory=True)
+ conf.env['FLEXFLAGS'] = ''
diff --git a/third_party/waf/wafadmin/Tools/gas.py b/third_party/waf/wafadmin/Tools/gas.py
new file mode 100644
index 0000000..5dd0b5d
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/gas.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2008 (ita)
+
+"as and gas"
+
+import os, sys
+import Task
+from TaskGen import extension, taskgen, after, before
+
+EXT_ASM = ['.s', '.S', '.asm', '.ASM', '.spp', '.SPP']
+
+as_str = '${AS} ${ASFLAGS} ${_ASINCFLAGS} ${SRC} -o ${TGT}'
+Task.simple_task_type('asm', as_str, 'PINK', ext_out='.o', shell=False)
+
+@extension(EXT_ASM)
+def asm_hook(self, node):
+ # create the compilation task: cpp or cc
+ try: obj_ext = self.obj_ext
+ except AttributeError: obj_ext = '_%d.o' % self.idx
+
+ task = self.create_task('asm', node, node.change_ext(obj_ext))
+ self.compiled_tasks.append(task)
+ self.meths.append('asm_incflags')
+
+@after('apply_obj_vars_cc')
+@after('apply_obj_vars_cxx')
+@before('apply_link')
+def asm_incflags(self):
+ self.env.append_value('_ASINCFLAGS', self.env.ASINCFLAGS)
+ var = ('cxx' in self.features) and 'CXX' or 'CC'
+ self.env.append_value('_ASINCFLAGS', self.env['_%sINCFLAGS' % var])
+
+def detect(conf):
+ conf.find_program(['gas', 'as'], var='AS')
+ if not conf.env.AS: conf.env.AS = conf.env.CC
+ #conf.env.ASFLAGS = ['-c'] <- may be necesary for .S files
diff --git a/third_party/waf/wafadmin/Tools/gcc.py b/third_party/waf/wafadmin/Tools/gcc.py
new file mode 100644
index 0000000..83d5b24
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/gcc.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006-2008 (ita)
+# Ralf Habacker, 2006 (rh)
+# Yinon Ehrlich, 2009
+
+import os, sys
+import Configure, Options, Utils
+import ccroot, ar
+from Configure import conftest
+
+@conftest
+def find_gcc(conf):
+ cc = conf.find_program(['gcc', 'cc'], var='CC', mandatory=True)
+ cc = conf.cmd_to_list(cc)
+ ccroot.get_cc_version(conf, cc, gcc=True)
+ conf.env.CC_NAME = 'gcc'
+ conf.env.CC = cc
+
+@conftest
+def gcc_common_flags(conf):
+ v = conf.env
+
+ # CPPFLAGS CCDEFINES _CCINCFLAGS _CCDEFFLAGS
+
+ v['CCFLAGS_DEBUG'] = ['-g']
+
+ v['CCFLAGS_RELEASE'] = ['-O2']
+
+ v['CC_SRC_F'] = ''
+ v['CC_TGT_F'] = ['-c', '-o', ''] # shell hack for -MD
+ v['CPPPATH_ST'] = '-I%s' # template for adding include paths
+
+ # linker
+ if not v['LINK_CC']: v['LINK_CC'] = v['CC']
+ v['CCLNK_SRC_F'] = ''
+ v['CCLNK_TGT_F'] = ['-o', ''] # shell hack for -MD
+
+ v['LIB_ST'] = '-l%s' # template for adding libs
+ v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
+ v['STATICLIB_ST'] = '-l%s'
+ v['STATICLIBPATH_ST'] = '-L%s'
+ v['RPATH_ST'] = '-Wl,-rpath,%s'
+ v['CCDEFINES_ST'] = '-D%s'
+
+ v['SONAME_ST'] = '-Wl,-h,%s'
+ v['SHLIB_MARKER'] = '-Wl,-Bdynamic'
+ v['STATICLIB_MARKER'] = '-Wl,-Bstatic'
+ v['FULLSTATIC_MARKER'] = '-static'
+
+ # program
+ v['program_PATTERN'] = '%s'
+
+ # shared library
+ v['shlib_CCFLAGS'] = ['-fPIC', '-DPIC'] # avoid using -DPIC, -fPIC aleady defines the __PIC__ macro
+ v['shlib_LINKFLAGS'] = ['-shared']
+ v['shlib_PATTERN'] = 'lib%s.so'
+
+ # static lib
+ v['staticlib_LINKFLAGS'] = ['-Wl,-Bstatic']
+ v['staticlib_PATTERN'] = 'lib%s.a'
+
+ # osx stuff
+ v['LINKFLAGS_MACBUNDLE'] = ['-bundle', '-undefined', 'dynamic_lookup']
+ v['CCFLAGS_MACBUNDLE'] = ['-fPIC']
+ v['macbundle_PATTERN'] = '%s.bundle'
+
+@conftest
+def gcc_modifier_win32(conf):
+ v = conf.env
+ v['program_PATTERN'] = '%s.exe'
+
+ v['shlib_PATTERN'] = '%s.dll'
+ v['implib_PATTERN'] = 'lib%s.dll.a'
+ v['IMPLIB_ST'] = '-Wl,--out-implib,%s'
+
+ dest_arch = v['DEST_CPU']
+ v['shlib_CCFLAGS'] = ['-DPIC']
+
+ v.append_value('shlib_CCFLAGS', '-DDLL_EXPORT') # TODO adding nonstandard defines like this DLL_EXPORT is not a good idea
+
+ # Auto-import is enabled by default even without this option,
+ # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages
+ # that the linker emits otherwise.
+ v.append_value('LINKFLAGS', '-Wl,--enable-auto-import')
+
+@conftest
+def gcc_modifier_cygwin(conf):
+ gcc_modifier_win32(conf)
+ v = conf.env
+ v['shlib_PATTERN'] = 'cyg%s.dll'
+ v.append_value('shlib_LINKFLAGS', '-Wl,--enable-auto-image-base')
+
+@conftest
+def gcc_modifier_darwin(conf):
+ v = conf.env
+ v['shlib_CCFLAGS'] = ['-fPIC', '-compatibility_version', '1', '-current_version', '1']
+ v['shlib_LINKFLAGS'] = ['-dynamiclib']
+ v['shlib_PATTERN'] = 'lib%s.dylib'
+
+ v['staticlib_LINKFLAGS'] = []
+
+ v['SHLIB_MARKER'] = ''
+ v['STATICLIB_MARKER'] = ''
+ v['SONAME_ST'] = ''
+
+@conftest
+def gcc_modifier_aix(conf):
+ v = conf.env
+ v['program_LINKFLAGS'] = ['-Wl,-brtl']
+
+ v['shlib_LINKFLAGS'] = ['-shared','-Wl,-brtl,-bexpfull']
+
+ v['SHLIB_MARKER'] = ''
+
+@conftest
+def gcc_modifier_openbsd(conf):
+ conf.env['SONAME_ST'] = []
+
+@conftest
+def gcc_modifier_platform(conf):
+ # * set configurations specific for a platform.
+ # * the destination platform is detected automatically by looking at the macros the compiler predefines,
+ # and if it's not recognised, it fallbacks to sys.platform.
+ dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform()
+ gcc_modifier_func = globals().get('gcc_modifier_' + dest_os)
+ if gcc_modifier_func:
+ gcc_modifier_func(conf)
+
+def detect(conf):
+ conf.find_gcc()
+ conf.find_cpp()
+ conf.find_ar()
+ conf.gcc_common_flags()
+ conf.gcc_modifier_platform()
+ conf.cc_load_tools()
+ conf.cc_add_flags()
+ conf.link_add_flags()
diff --git a/third_party/waf/wafadmin/Tools/gdc.py b/third_party/waf/wafadmin/Tools/gdc.py
new file mode 100644
index 0000000..d1e5e7b
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/gdc.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Carlos Rafael Giani, 2007 (dv)
+
+import sys
+import Utils, ar
+from Configure import conftest
+
+@conftest
+def find_gdc(conf):
+ conf.find_program('gdc', var='D_COMPILER', mandatory=True)
+
+@conftest
+def common_flags_gdc(conf):
+ v = conf.env
+
+ # _DFLAGS _DIMPORTFLAGS
+
+ # for mory info about the meaning of this dict see dmd.py
+ v['DFLAGS'] = []
+
+ v['D_SRC_F'] = ''
+ v['D_TGT_F'] = ['-c', '-o', '']
+ v['DPATH_ST'] = '-I%s' # template for adding import paths
+
+ # linker
+ v['D_LINKER'] = v['D_COMPILER']
+ v['DLNK_SRC_F'] = ''
+ v['DLNK_TGT_F'] = ['-o', '']
+
+ v['DLIB_ST'] = '-l%s' # template for adding libs
+ v['DLIBPATH_ST'] = '-L%s' # template for adding libpaths
+
+ # debug levels
+ v['DLINKFLAGS'] = []
+ v['DFLAGS_OPTIMIZED'] = ['-O3']
+ v['DFLAGS_DEBUG'] = ['-O0']
+ v['DFLAGS_ULTRADEBUG'] = ['-O0']
+
+ v['D_shlib_DFLAGS'] = []
+ v['D_shlib_LINKFLAGS'] = ['-shared']
+
+ v['DHEADER_ext'] = '.di'
+ v['D_HDR_F'] = '-fintfc -fintfc-file='
+
+def detect(conf):
+ conf.find_gdc()
+ conf.check_tool('ar')
+ conf.check_tool('d')
+ conf.common_flags_gdc()
+ conf.d_platform_flags()
diff --git a/third_party/waf/wafadmin/Tools/glib2.py b/third_party/waf/wafadmin/Tools/glib2.py
new file mode 100644
index 0000000..d3fc776
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/glib2.py
@@ -0,0 +1,163 @@
+#! /usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006-2008 (ita)
+
+"GLib2 support"
+
+import Task, Utils
+from TaskGen import taskgen, before, after, feature
+
+#
+# glib-genmarshal
+#
+
+@taskgen
+def add_marshal_file(self, filename, prefix):
+ if not hasattr(self, 'marshal_list'):
+ self.marshal_list = []
+ self.meths.append('process_marshal')
+ self.marshal_list.append((filename, prefix))
+
+@before('apply_core')
+def process_marshal(self):
+ for f, prefix in getattr(self, 'marshal_list', []):
+ node = self.path.find_resource(f)
+
+ if not node:
+ raise Utils.WafError('file not found %r' % f)
+
+ h_node = node.change_ext('.h')
+ c_node = node.change_ext('.c')
+
+ task = self.create_task('glib_genmarshal', node, [h_node, c_node])
+ task.env.GLIB_GENMARSHAL_PREFIX = prefix
+ self.allnodes.append(c_node)
+
+def genmarshal_func(self):
+
+ bld = self.inputs[0].__class__.bld
+
+ get = self.env.get_flat
+ cmd1 = "%s %s --prefix=%s --header > %s" % (
+ get('GLIB_GENMARSHAL'),
+ self.inputs[0].srcpath(self.env),
+ get('GLIB_GENMARSHAL_PREFIX'),
+ self.outputs[0].abspath(self.env)
+ )
+
+ ret = bld.exec_command(cmd1)
+ if ret: return ret
+
+ #print self.outputs[1].abspath(self.env)
+ f = open(self.outputs[1].abspath(self.env), 'wb')
+ c = '''#include "%s"\n''' % self.outputs[0].name
+ f.write(c)
+ f.close()
+
+ cmd2 = "%s %s --prefix=%s --body >> %s" % (
+ get('GLIB_GENMARSHAL'),
+ self.inputs[0].srcpath(self.env),
+ get('GLIB_GENMARSHAL_PREFIX'),
+ self.outputs[1].abspath(self.env)
+ )
+ ret = Utils.exec_command(cmd2)
+ if ret: return ret
+
+#
+# glib-mkenums
+#
+
+@taskgen
+def add_enums_from_template(self, source='', target='', template='', comments=''):
+ if not hasattr(self, 'enums_list'):
+ self.enums_list = []
+ self.meths.append('process_enums')
+ self.enums_list.append({'source': source,
+ 'target': target,
+ 'template': template,
+ 'file-head': '',
+ 'file-prod': '',
+ 'file-tail': '',
+ 'enum-prod': '',
+ 'value-head': '',
+ 'value-prod': '',
+ 'value-tail': '',
+ 'comments': comments})
+
+@taskgen
+def add_enums(self, source='', target='',
+ file_head='', file_prod='', file_tail='', enum_prod='',
+ value_head='', value_prod='', value_tail='', comments=''):
+ if not hasattr(self, 'enums_list'):
+ self.enums_list = []
+ self.meths.append('process_enums')
+ self.enums_list.append({'source': source,
+ 'template': '',
+ 'target': target,
+ 'file-head': file_head,
+ 'file-prod': file_prod,
+ 'file-tail': file_tail,
+ 'enum-prod': enum_prod,
+ 'value-head': value_head,
+ 'value-prod': value_prod,
+ 'value-tail': value_tail,
+ 'comments': comments})
+
+@before('apply_core')
+def process_enums(self):
+ for enum in getattr(self, 'enums_list', []):
+ task = self.create_task('glib_mkenums')
+ env = task.env
+
+ inputs = []
+
+ # process the source
+ source_list = self.to_list(enum['source'])
+ if not source_list:
+ raise Utils.WafError('missing source ' + str(enum))
+ source_list = [self.path.find_resource(k) for k in source_list]
+ inputs += source_list
+ env['GLIB_MKENUMS_SOURCE'] = [k.srcpath(env) for k in source_list]
+
+ # find the target
+ if not enum['target']:
+ raise Utils.WafError('missing target ' + str(enum))
+ tgt_node = self.path.find_or_declare(enum['target'])
+ if tgt_node.name.endswith('.c'):
+ self.allnodes.append(tgt_node)
+ env['GLIB_MKENUMS_TARGET'] = tgt_node.abspath(env)
+
+
+ options = []
+
+ if enum['template']: # template, if provided
+ template_node = self.path.find_resource(enum['template'])
+ options.append('--template %s' % (template_node.abspath(env)))
+ inputs.append(template_node)
+ params = {'file-head' : '--fhead',
+ 'file-prod' : '--fprod',
+ 'file-tail' : '--ftail',
+ 'enum-prod' : '--eprod',
+ 'value-head' : '--vhead',
+ 'value-prod' : '--vprod',
+ 'value-tail' : '--vtail',
+ 'comments': '--comments'}
+ for param, option in params.iteritems():
+ if enum[param]:
+ options.append('%s %r' % (option, enum[param]))
+
+ env['GLIB_MKENUMS_OPTIONS'] = ' '.join(options)
+
+ # update the task instance
+ task.set_inputs(inputs)
+ task.set_outputs(tgt_node)
+
+Task.task_type_from_func('glib_genmarshal', func=genmarshal_func, vars=['GLIB_GENMARSHAL_PREFIX', 'GLIB_GENMARSHAL'],
+ color='BLUE', before='cc cxx')
+Task.simple_task_type('glib_mkenums',
+ '${GLIB_MKENUMS} ${GLIB_MKENUMS_OPTIONS} ${GLIB_MKENUMS_SOURCE} > ${GLIB_MKENUMS_TARGET}',
+ color='PINK', before='cc cxx')
+
+def detect(conf):
+ glib_genmarshal = conf.find_program('glib-genmarshal', var='GLIB_GENMARSHAL')
+ mk_enums_tool = conf.find_program('glib-mkenums', var='GLIB_MKENUMS')
diff --git a/third_party/waf/wafadmin/Tools/gnome.py b/third_party/waf/wafadmin/Tools/gnome.py
new file mode 100644
index 0000000..da11e91
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/gnome.py
@@ -0,0 +1,222 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006-2008 (ita)
+
+"Gnome support"
+
+import os, re
+import TaskGen, Utils, Runner, Task, Build, Options, Logs
+import cc
+from Logs import error
+from TaskGen import taskgen, before, after, feature
+
+n1_regexp = re.compile('<refentrytitle>(.*)</refentrytitle>', re.M)
+n2_regexp = re.compile('<manvolnum>(.*)</manvolnum>', re.M)
+
+def postinstall_schemas(prog_name):
+ if Build.bld.is_install:
+ dir = Build.bld.get_install_path('${PREFIX}/etc/gconf/schemas/%s.schemas' % prog_name)
+ if not Options.options.destdir:
+ # add the gconf schema
+ Utils.pprint('YELLOW', 'Installing GConf schema')
+ command = 'gconftool-2 --install-schema-file=%s 1> /dev/null' % dir
+ ret = Utils.exec_command(command)
+ else:
+ Utils.pprint('YELLOW', 'GConf schema not installed. After install, run this:')
+ Utils.pprint('YELLOW', 'gconftool-2 --install-schema-file=%s' % dir)
+
+def postinstall_icons():
+ dir = Build.bld.get_install_path('${DATADIR}/icons/hicolor')
+ if Build.bld.is_install:
+ if not Options.options.destdir:
+ # update the pixmap cache directory
+ Utils.pprint('YELLOW', "Updating Gtk icon cache.")
+ command = 'gtk-update-icon-cache -q -f -t %s' % dir
+ ret = Utils.exec_command(command)
+ else:
+ Utils.pprint('YELLOW', 'Icon cache not updated. After install, run this:')
+ Utils.pprint('YELLOW', 'gtk-update-icon-cache -q -f -t %s' % dir)
+
+def postinstall_scrollkeeper(prog_name):
+ if Build.bld.is_install:
+ # now the scrollkeeper update if we can write to the log file
+ if os.access('/var/log/scrollkeeper.log', os.W_OK):
+ dir1 = Build.bld.get_install_path('${PREFIX}/var/scrollkeeper')
+ dir2 = Build.bld.get_install_path('${DATADIR}/omf/%s' % prog_name)
+ command = 'scrollkeeper-update -q -p %s -o %s' % (dir1, dir2)
+ ret = Utils.exec_command(command)
+
+def postinstall(prog_name='myapp', schemas=1, icons=1, scrollkeeper=1):
+ if schemas: postinstall_schemas(prog_name)
+ if icons: postinstall_icons()
+ if scrollkeeper: postinstall_scrollkeeper(prog_name)
+
+# OBSOLETE
+class gnome_doc_taskgen(TaskGen.task_gen):
+ def __init__(self, *k, **kw):
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+@feature('gnome_doc')
+def init_gnome_doc(self):
+ self.default_install_path = '${PREFIX}/share'
+
+@feature('gnome_doc')
+@after('init_gnome_doc')
+def apply_gnome_doc(self):
+ self.env['APPNAME'] = self.doc_module
+ lst = self.to_list(self.doc_linguas)
+ bld = self.bld
+ lst.append('C')
+
+ for x in lst:
+ if not x == 'C':
+ tsk = self.create_task('xml2po')
+ node = self.path.find_resource(x+'/'+x+'.po')
+ src = self.path.find_resource('C/%s.xml' % self.doc_module)
+ out = self.path.find_or_declare('%s/%s.xml' % (x, self.doc_module))
+ tsk.set_inputs([node, src])
+ tsk.set_outputs(out)
+ else:
+ out = self.path.find_resource('%s/%s.xml' % (x, self.doc_module))
+
+ tsk2 = self.create_task('xsltproc2po')
+ out2 = self.path.find_or_declare('%s/%s-%s.omf' % (x, self.doc_module, x))
+ tsk2.set_outputs(out2)
+ node = self.path.find_resource(self.doc_module+".omf.in")
+ tsk2.inputs = [node, out]
+
+ tsk2.run_after.append(tsk)
+
+ if bld.is_install:
+ path = self.install_path + '/gnome/help/%s/%s' % (self.doc_module, x)
+ bld.install_files(self.install_path + '/omf', out2, env=self.env)
+ for y in self.to_list(self.doc_figures):
+ try:
+ os.stat(self.path.abspath() + '/' + x + '/' + y)
+ bld.install_as(path + '/' + y, self.path.abspath() + '/' + x + '/' + y)
+ except:
+ bld.install_as(path + '/' + y, self.path.abspath() + '/C/' + y)
+ bld.install_as(path + '/%s.xml' % self.doc_module, out.abspath(self.env))
+ if x == 'C':
+ xmls = self.to_list(self.doc_includes)
+ xmls.append(self.doc_entities)
+ for z in xmls:
+ out = self.path.find_resource('%s/%s' % (x, z))
+ bld.install_as(path + '/%s' % z, out.abspath(self.env))
+
+# OBSOLETE
+class xml_to_taskgen(TaskGen.task_gen):
+ def __init__(self, *k, **kw):
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+@feature('xml_to')
+def init_xml_to(self):
+ Utils.def_attrs(self,
+ source = 'xmlfile',
+ xslt = 'xlsltfile',
+ target = 'hey',
+ default_install_path = '${PREFIX}',
+ task_created = None)
+
+@feature('xml_to')
+@after('init_xml_to')
+def apply_xml_to(self):
+ xmlfile = self.path.find_resource(self.source)
+ xsltfile = self.path.find_resource(self.xslt)
+ tsk = self.create_task('xmlto', [xmlfile, xsltfile], xmlfile.change_ext('html'))
+ tsk.install_path = self.install_path
+
+def sgml_scan(self):
+ node = self.inputs[0]
+
+ env = self.env
+ variant = node.variant(env)
+
+ fi = open(node.abspath(env), 'r')
+ content = fi.read()
+ fi.close()
+
+ # we should use a sgml parser :-/
+ name = n1_regexp.findall(content)[0]
+ num = n2_regexp.findall(content)[0]
+
+ doc_name = name+'.'+num
+
+ if not self.outputs:
+ self.outputs = [self.generator.path.find_or_declare(doc_name)]
+
+ return ([], [doc_name])
+
+class gnome_sgml2man_taskgen(TaskGen.task_gen):
+ def __init__(self, *k, **kw):
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+@feature('gnome_sgml2man')
+def apply_gnome_sgml2man(self):
+ """
+ we could make it more complicated, but for now we just scan the document each time
+ """
+ assert(getattr(self, 'appname', None))
+
+ def install_result(task):
+ out = task.outputs[0]
+ name = out.name
+ ext = name[-1]
+ env = task.env
+ self.bld.install_files('${DATADIR}/man/man%s/' % ext, out, env)
+
+ self.bld.rescan(self.path)
+ for name in self.bld.cache_dir_contents[self.path.id]:
+ base, ext = os.path.splitext(name)
+ if ext != '.sgml': continue
+
+ task = self.create_task('sgml2man')
+ task.set_inputs(self.path.find_resource(name))
+ task.task_generator = self
+ if self.bld.is_install: task.install = install_result
+ # no outputs, the scanner does it
+ # no caching for now, this is not a time-critical feature
+ # in the future the scanner can be used to do more things (find dependencies, etc)
+ task.scan()
+
+cls = Task.simple_task_type('sgml2man', '${SGML2MAN} -o ${TGT[0].bld_dir(env)} ${SRC} > /dev/null', color='BLUE')
+cls.scan = sgml_scan
+cls.quiet = 1
+
+Task.simple_task_type('xmlto', '${XMLTO} html -m ${SRC[1].abspath(env)} ${SRC[0].abspath(env)}')
+
+Task.simple_task_type('xml2po', '${XML2PO} ${XML2POFLAGS} ${SRC} > ${TGT}', color='BLUE')
+
+# how do you expect someone to understand this?!
+xslt_magic = """${XSLTPROC2PO} -o ${TGT[0].abspath(env)} \
+--stringparam db2omf.basename ${APPNAME} \
+--stringparam db2omf.format docbook \
+--stringparam db2omf.lang ${TGT[0].abspath(env)[:-4].split('-')[-1]} \
+--stringparam db2omf.dtd '-//OASIS//DTD DocBook XML V4.3//EN' \
+--stringparam db2omf.omf_dir ${PREFIX}/share/omf \
+--stringparam db2omf.help_dir ${PREFIX}/share/gnome/help \
+--stringparam db2omf.omf_in ${SRC[0].abspath(env)} \
+--stringparam db2omf.scrollkeeper_cl ${SCROLLKEEPER_DATADIR}/Templates/C/scrollkeeper_cl.xml \
+${DB2OMF} ${SRC[1].abspath(env)}"""
+
+#--stringparam db2omf.dtd '-//OASIS//DTD DocBook XML V4.3//EN' \
+Task.simple_task_type('xsltproc2po', xslt_magic, color='BLUE')
+
+def detect(conf):
+ conf.check_tool('gnu_dirs glib2 dbus')
+ sgml2man = conf.find_program('docbook2man', var='SGML2MAN')
+
+ def getstr(varname):
+ return getattr(Options.options, varname, '')
+
+ # addefine also sets the variable to the env
+ conf.define('GNOMELOCALEDIR', os.path.join(conf.env['DATADIR'], 'locale'))
+
+ xml2po = conf.find_program('xml2po', var='XML2PO')
+ xsltproc2po = conf.find_program('xsltproc', var='XSLTPROC2PO')
+ conf.env['XML2POFLAGS'] = '-e -p'
+ conf.env['SCROLLKEEPER_DATADIR'] = Utils.cmd_output("scrollkeeper-config --pkgdatadir", silent=1).strip()
+ conf.env['DB2OMF'] = Utils.cmd_output("/usr/bin/pkg-config --variable db2omf gnome-doc-utils", silent=1).strip()
+
+def set_options(opt):
+ opt.add_option('--want-rpath', type='int', default=1, dest='want_rpath', help='set rpath to 1 or 0 [Default 1]')
diff --git a/third_party/waf/wafadmin/Tools/gnu_dirs.py b/third_party/waf/wafadmin/Tools/gnu_dirs.py
new file mode 100644
index 0000000..ac149df
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/gnu_dirs.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Ali Sabil, 2007
+
+"""
+To use this module do not forget to call
+opt.tool_options('gnu_dirs')
+AND
+conf.check_tool('gnu_dirs')
+
+Add options for the standard GNU directories, this tool will add the options
+found in autotools, and will update the environment with the following
+installation variables:
+
+ * PREFIX : architecture-independent files [/usr/local]
+ * EXEC_PREFIX : architecture-dependent files [PREFIX]
+ * BINDIR : user executables [EXEC_PREFIX/bin]
+ * SBINDIR : user executables [EXEC_PREFIX/sbin]
+ * LIBEXECDIR : program executables [EXEC_PREFIX/libexec]
+ * SYSCONFDIR : read-only single-machine data [PREFIX/etc]
+ * SHAREDSTATEDIR : modifiable architecture-independent data [PREFIX/com]
+ * LOCALSTATEDIR : modifiable single-machine data [PREFIX/var]
+ * LIBDIR : object code libraries [EXEC_PREFIX/lib]
+ * INCLUDEDIR : C header files [PREFIX/include]
+ * OLDINCLUDEDIR : C header files for non-gcc [/usr/include]
+ * DATAROOTDIR : read-only arch.-independent data root [PREFIX/share]
+ * DATADIR : read-only architecture-independent data [DATAROOTDIR]
+ * INFODIR : info documentation [DATAROOTDIR/info]
+ * LOCALEDIR : locale-dependent data [DATAROOTDIR/locale]
+ * MANDIR : man documentation [DATAROOTDIR/man]
+ * DOCDIR : documentation root [DATAROOTDIR/doc/telepathy-glib]
+ * HTMLDIR : html documentation [DOCDIR]
+ * DVIDIR : dvi documentation [DOCDIR]
+ * PDFDIR : pdf documentation [DOCDIR]
+ * PSDIR : ps documentation [DOCDIR]
+"""
+
+import Utils, Options
+
+_options = [x.split(', ') for x in '''
+bindir, user executables, ${EXEC_PREFIX}/bin
+sbindir, system admin executables, ${EXEC_PREFIX}/sbin
+libexecdir, program executables, ${EXEC_PREFIX}/libexec
+sysconfdir, read-only single-machine data, ${PREFIX}/etc
+sharedstatedir, modifiable architecture-independent data, ${PREFIX}/com
+localstatedir, modifiable single-machine data, ${PREFIX}/var
+libdir, object code libraries, ${EXEC_PREFIX}/lib
+includedir, C header files, ${PREFIX}/include
+oldincludedir, C header files for non-gcc, /usr/include
+datarootdir, read-only arch.-independent data root, ${PREFIX}/share
+datadir, read-only architecture-independent data, ${DATAROOTDIR}
+infodir, info documentation, ${DATAROOTDIR}/info
+localedir, locale-dependent data, ${DATAROOTDIR}/locale
+mandir, man documentation, ${DATAROOTDIR}/man
+docdir, documentation root, ${DATAROOTDIR}/doc/${PACKAGE}
+htmldir, html documentation, ${DOCDIR}
+dvidir, dvi documentation, ${DOCDIR}
+pdfdir, pdf documentation, ${DOCDIR}
+psdir, ps documentation, ${DOCDIR}
+'''.split('\n') if x]
+
+def detect(conf):
+ def get_param(varname, default):
+ return getattr(Options.options, varname, '') or default
+
+ env = conf.env
+ env['EXEC_PREFIX'] = get_param('EXEC_PREFIX', env['PREFIX'])
+ env['PACKAGE'] = Utils.g_module.APPNAME
+
+ complete = False
+ iter = 0
+ while not complete and iter < len(_options) + 1:
+ iter += 1
+ complete = True
+ for name, help, default in _options:
+ name = name.upper()
+ if not env[name]:
+ try:
+ env[name] = Utils.subst_vars(get_param(name, default), env)
+ except TypeError:
+ complete = False
+ if not complete:
+ lst = [name for name, _, _ in _options if not env[name.upper()]]
+ raise Utils.WafError('Variable substitution failure %r' % lst)
+
+def set_options(opt):
+
+ inst_dir = opt.add_option_group('Installation directories',
+'By default, "waf install" will put the files in\
+ "/usr/local/bin", "/usr/local/lib" etc. An installation prefix other\
+ than "/usr/local" can be given using "--prefix", for example "--prefix=$HOME"')
+
+ for k in ('--prefix', '--destdir'):
+ option = opt.parser.get_option(k)
+ if option:
+ opt.parser.remove_option(k)
+ inst_dir.add_option(option)
+
+ inst_dir.add_option('--exec-prefix',
+ help = 'installation prefix [Default: ${PREFIX}]',
+ default = '',
+ dest = 'EXEC_PREFIX')
+
+ dirs_options = opt.add_option_group('Pre-defined installation directories', '')
+
+ for name, help, default in _options:
+ option_name = '--' + name
+ str_default = default
+ str_help = '%s [Default: %s]' % (help, str_default)
+ dirs_options.add_option(option_name, help=str_help, default='', dest=name.upper())
diff --git a/third_party/waf/wafadmin/Tools/gob2.py b/third_party/waf/wafadmin/Tools/gob2.py
new file mode 100644
index 0000000..96d8e20
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/gob2.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Ali Sabil, 2007
+
+import TaskGen
+
+TaskGen.declare_chain(
+ name = 'gob2',
+ rule = '${GOB2} -o ${TGT[0].bld_dir(env)} ${GOB2FLAGS} ${SRC}',
+ ext_in = '.gob',
+ ext_out = '.c'
+)
+
+def detect(conf):
+ gob2 = conf.find_program('gob2', var='GOB2', mandatory=True)
+ conf.env['GOB2'] = gob2
+ conf.env['GOB2FLAGS'] = ''
diff --git a/third_party/waf/wafadmin/Tools/gxx.py b/third_party/waf/wafadmin/Tools/gxx.py
new file mode 100644
index 0000000..38e8d00
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/gxx.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006 (ita)
+# Ralf Habacker, 2006 (rh)
+# Yinon Ehrlich, 2009
+
+import os, sys
+import Configure, Options, Utils
+import ccroot, ar
+from Configure import conftest
+
+@conftest
+def find_gxx(conf):
+ cxx = conf.find_program(['g++', 'c++'], var='CXX', mandatory=True)
+ cxx = conf.cmd_to_list(cxx)
+ ccroot.get_cc_version(conf, cxx, gcc=True)
+ conf.env.CXX_NAME = 'gcc'
+ conf.env.CXX = cxx
+
+@conftest
+def gxx_common_flags(conf):
+ v = conf.env
+
+ # CPPFLAGS CXXDEFINES _CXXINCFLAGS _CXXDEFFLAGS
+ v['CXXFLAGS_DEBUG'] = ['-g']
+ v['CXXFLAGS_RELEASE'] = ['-O2']
+
+ v['CXX_SRC_F'] = ''
+ v['CXX_TGT_F'] = ['-c', '-o', ''] # shell hack for -MD
+ v['CPPPATH_ST'] = '-I%s' # template for adding include paths
+
+ # linker
+ if not v['LINK_CXX']: v['LINK_CXX'] = v['CXX']
+ v['CXXLNK_SRC_F'] = ''
+ v['CXXLNK_TGT_F'] = ['-o', ''] # shell hack for -MD
+
+ v['LIB_ST'] = '-l%s' # template for adding libs
+ v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
+ v['STATICLIB_ST'] = '-l%s'
+ v['STATICLIBPATH_ST'] = '-L%s'
+ v['RPATH_ST'] = '-Wl,-rpath,%s'
+ v['CXXDEFINES_ST'] = '-D%s'
+
+ v['SONAME_ST'] = '-Wl,-h,%s'
+ v['SHLIB_MARKER'] = '-Wl,-Bdynamic'
+ v['STATICLIB_MARKER'] = '-Wl,-Bstatic'
+ v['FULLSTATIC_MARKER'] = '-static'
+
+ # program
+ v['program_PATTERN'] = '%s'
+
+ # shared library
+ v['shlib_CXXFLAGS'] = ['-fPIC', '-DPIC'] # avoid using -DPIC, -fPIC aleady defines the __PIC__ macro
+ v['shlib_LINKFLAGS'] = ['-shared']
+ v['shlib_PATTERN'] = 'lib%s.so'
+
+ # static lib
+ v['staticlib_LINKFLAGS'] = ['-Wl,-Bstatic']
+ v['staticlib_PATTERN'] = 'lib%s.a'
+
+ # osx stuff
+ v['LINKFLAGS_MACBUNDLE'] = ['-bundle', '-undefined', 'dynamic_lookup']
+ v['CCFLAGS_MACBUNDLE'] = ['-fPIC']
+ v['macbundle_PATTERN'] = '%s.bundle'
+
+@conftest
+def gxx_modifier_win32(conf):
+ v = conf.env
+ v['program_PATTERN'] = '%s.exe'
+
+ v['shlib_PATTERN'] = '%s.dll'
+ v['implib_PATTERN'] = 'lib%s.dll.a'
+ v['IMPLIB_ST'] = '-Wl,--out-implib,%s'
+
+ dest_arch = v['DEST_CPU']
+ v['shlib_CXXFLAGS'] = []
+
+ v.append_value('shlib_CXXFLAGS', '-DDLL_EXPORT') # TODO adding nonstandard defines like this DLL_EXPORT is not a good idea
+
+ # Auto-import is enabled by default even without this option,
+ # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages
+ # that the linker emits otherwise.
+ v.append_value('LINKFLAGS', '-Wl,--enable-auto-import')
+
+@conftest
+def gxx_modifier_cygwin(conf):
+ gxx_modifier_win32(conf)
+ v = conf.env
+ v['shlib_PATTERN'] = 'cyg%s.dll'
+ v.append_value('shlib_LINKFLAGS', '-Wl,--enable-auto-image-base')
+
+@conftest
+def gxx_modifier_darwin(conf):
+ v = conf.env
+ v['shlib_CXXFLAGS'] = ['-fPIC', '-compatibility_version', '1', '-current_version', '1']
+ v['shlib_LINKFLAGS'] = ['-dynamiclib']
+ v['shlib_PATTERN'] = 'lib%s.dylib'
+
+ v['staticlib_LINKFLAGS'] = []
+
+ v['SHLIB_MARKER'] = ''
+ v['STATICLIB_MARKER'] = ''
+ v['SONAME_ST'] = ''
+
+@conftest
+def gxx_modifier_aix(conf):
+ v = conf.env
+ v['program_LINKFLAGS'] = ['-Wl,-brtl']
+
+ v['shlib_LINKFLAGS'] = ['-shared', '-Wl,-brtl,-bexpfull']
+
+ v['SHLIB_MARKER'] = ''
+
+@conftest
+def gxx_modifier_openbsd(conf):
+ conf.env['SONAME_ST'] = []
+
+@conftest
+def gxx_modifier_platform(conf):
+ # * set configurations specific for a platform.
+ # * the destination platform is detected automatically by looking at the macros the compiler predefines,
+ # and if it's not recognised, it fallbacks to sys.platform.
+ dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform()
+ gxx_modifier_func = globals().get('gxx_modifier_' + dest_os)
+ if gxx_modifier_func:
+ gxx_modifier_func(conf)
+
+def detect(conf):
+ conf.find_gxx()
+ conf.find_cpp()
+ conf.find_ar()
+ conf.gxx_common_flags()
+ conf.gxx_modifier_platform()
+ conf.cxx_load_tools()
+ conf.cxx_add_flags()
+ conf.link_add_flags()
diff --git a/third_party/waf/wafadmin/Tools/icc.py b/third_party/waf/wafadmin/Tools/icc.py
new file mode 100644
index 0000000..9c9a926
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/icc.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Stian Selnes, 2008
+# Thomas Nagy 2009
+
+import os, sys
+import Configure, Options, Utils
+import ccroot, ar, gcc
+from Configure import conftest
+
+@conftest
+def find_icc(conf):
+ if sys.platform == 'cygwin':
+ conf.fatal('The Intel compiler does not work on Cygwin')
+
+ v = conf.env
+ cc = None
+ if v['CC']: cc = v['CC']
+ elif 'CC' in conf.environ: cc = conf.environ['CC']
+ if not cc: cc = conf.find_program('icc', var='CC')
+ if not cc: cc = conf.find_program('ICL', var='CC')
+ if not cc: conf.fatal('Intel C Compiler (icc) was not found')
+ cc = conf.cmd_to_list(cc)
+
+ ccroot.get_cc_version(conf, cc, icc=True)
+ v['CC'] = cc
+ v['CC_NAME'] = 'icc'
+
+detect = '''
+find_icc
+find_ar
+gcc_common_flags
+gcc_modifier_platform
+cc_load_tools
+cc_add_flags
+link_add_flags
+'''
diff --git a/third_party/waf/wafadmin/Tools/icpc.py b/third_party/waf/wafadmin/Tools/icpc.py
new file mode 100644
index 0000000..7d79c57
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/icpc.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy 2009
+
+import os, sys
+import Configure, Options, Utils
+import ccroot, ar, gxx
+from Configure import conftest
+
+@conftest
+def find_icpc(conf):
+ if sys.platform == 'cygwin':
+ conf.fatal('The Intel compiler does not work on Cygwin')
+
+ v = conf.env
+ cxx = None
+ if v['CXX']: cxx = v['CXX']
+ elif 'CXX' in conf.environ: cxx = conf.environ['CXX']
+ if not cxx: cxx = conf.find_program('icpc', var='CXX')
+ if not cxx: conf.fatal('Intel C++ Compiler (icpc) was not found')
+ cxx = conf.cmd_to_list(cxx)
+
+ ccroot.get_cc_version(conf, cxx, icc=True)
+ v['CXX'] = cxx
+ v['CXX_NAME'] = 'icc'
+
+detect = '''
+find_icpc
+find_ar
+gxx_common_flags
+gxx_modifier_platform
+cxx_load_tools
+cxx_add_flags
+link_add_flags
+'''
diff --git a/third_party/waf/wafadmin/Tools/intltool.py b/third_party/waf/wafadmin/Tools/intltool.py
new file mode 100644
index 0000000..5fb3df2
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/intltool.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006 (ita)
+
+"intltool support"
+
+import os, re
+import Configure, TaskGen, Task, Utils, Runner, Options, Build, config_c
+from TaskGen import feature, before, taskgen
+from Logs import error
+
+"""
+Usage:
+
+bld(features='intltool_in', source='a.po b.po', podir='po', cache='.intlcache', flags='')
+
+"""
+
+class intltool_in_taskgen(TaskGen.task_gen):
+ """deprecated"""
+ def __init__(self, *k, **kw):
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+@before('apply_core')
+@feature('intltool_in')
+def iapply_intltool_in_f(self):
+ try: self.meths.remove('apply_core')
+ except ValueError: pass
+
+ for i in self.to_list(self.source):
+ node = self.path.find_resource(i)
+
+ podir = getattr(self, 'podir', 'po')
+ podirnode = self.path.find_dir(podir)
+ if not podirnode:
+ error("could not find the podir %r" % podir)
+ continue
+
+ cache = getattr(self, 'intlcache', '.intlcache')
+ self.env['INTLCACHE'] = os.path.join(self.path.bldpath(self.env), podir, cache)
+ self.env['INTLPODIR'] = podirnode.srcpath(self.env)
+ self.env['INTLFLAGS'] = getattr(self, 'flags', ['-q', '-u', '-c'])
+
+ task = self.create_task('intltool', node, node.change_ext(''))
+ task.install_path = self.install_path
+
+class intltool_po_taskgen(TaskGen.task_gen):
+ """deprecated"""
+ def __init__(self, *k, **kw):
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+
+@feature('intltool_po')
+def apply_intltool_po(self):
+ try: self.meths.remove('apply_core')
+ except ValueError: pass
+
+ self.default_install_path = '${LOCALEDIR}'
+ appname = getattr(self, 'appname', 'set_your_app_name')
+ podir = getattr(self, 'podir', '')
+
+ def install_translation(task):
+ out = task.outputs[0]
+ filename = out.name
+ (langname, ext) = os.path.splitext(filename)
+ inst_file = langname + os.sep + 'LC_MESSAGES' + os.sep + appname + '.mo'
+ self.bld.install_as(os.path.join(self.install_path, inst_file), out, self.env, self.chmod)
+
+ linguas = self.path.find_resource(os.path.join(podir, 'LINGUAS'))
+ if linguas:
+ # scan LINGUAS file for locales to process
+ file = open(linguas.abspath())
+ langs = []
+ for line in file.readlines():
+ # ignore lines containing comments
+ if not line.startswith('#'):
+ langs += line.split()
+ file.close()
+ re_linguas = re.compile('[-a-zA-Z_@.]+')
+ for lang in langs:
+ # Make sure that we only process lines which contain locales
+ if re_linguas.match(lang):
+ node = self.path.find_resource(os.path.join(podir, re_linguas.match(lang).group() + '.po'))
+ task = self.create_task('po')
+ task.set_inputs(node)
+ task.set_outputs(node.change_ext('.mo'))
+ if self.bld.is_install: task.install = install_translation
+ else:
+ Utils.pprint('RED', "Error no LINGUAS file found in po directory")
+
+Task.simple_task_type('po', '${POCOM} -o ${TGT} ${SRC}', color='BLUE', shell=False)
+Task.simple_task_type('intltool',
+ '${INTLTOOL} ${INTLFLAGS} ${INTLCACHE} ${INTLPODIR} ${SRC} ${TGT}',
+ color='BLUE', after="cc_link cxx_link", shell=False)
+
+def detect(conf):
+ pocom = conf.find_program('msgfmt')
+ if not pocom:
+ # if msgfmt should not be mandatory, catch the thrown exception in your wscript
+ conf.fatal('The program msgfmt (gettext) is mandatory!')
+ conf.env['POCOM'] = pocom
+
+ # NOTE: it is possible to set INTLTOOL in the environment, but it must not have spaces in it
+
+ intltool = conf.find_program('intltool-merge', var='INTLTOOL')
+ if not intltool:
+ # if intltool-merge should not be mandatory, catch the thrown exception in your wscript
+ if Options.platform == 'win32':
+ perl = conf.find_program('perl', var='PERL')
+ if not perl:
+ conf.fatal('The program perl (required by intltool) could not be found')
+
+ intltooldir = Configure.find_file('intltool-merge', os.environ['PATH'].split(os.pathsep))
+ if not intltooldir:
+ conf.fatal('The program intltool-merge (intltool, gettext-devel) is mandatory!')
+
+ conf.env['INTLTOOL'] = Utils.to_list(conf.env['PERL']) + [intltooldir + os.sep + 'intltool-merge']
+ conf.check_message('intltool', '', True, ' '.join(conf.env['INTLTOOL']))
+ else:
+ conf.fatal('The program intltool-merge (intltool, gettext-devel) is mandatory!')
+
+ def getstr(varname):
+ return getattr(Options.options, varname, '')
+
+ prefix = conf.env['PREFIX']
+ datadir = getstr('datadir')
+ if not datadir: datadir = os.path.join(prefix,'share')
+
+ conf.define('LOCALEDIR', os.path.join(datadir, 'locale'))
+ conf.define('DATADIR', datadir)
+
+ if conf.env['CC'] or conf.env['CXX']:
+ # Define to 1 if <locale.h> is present
+ conf.check(header_name='locale.h')
+
+def set_options(opt):
+ opt.add_option('--want-rpath', type='int', default=1, dest='want_rpath', help='set rpath to 1 or 0 [Default 1]')
+ opt.add_option('--datadir', type='string', default='', dest='datadir', help='read-only application data')
diff --git a/third_party/waf/wafadmin/Tools/javaw.py b/third_party/waf/wafadmin/Tools/javaw.py
new file mode 100644
index 0000000..4d9f4c7
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/javaw.py
@@ -0,0 +1,254 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006-2008 (ita)
+
+"""
+Java support
+
+Javac is one of the few compilers that behaves very badly:
+* it outputs files where it wants to (-d is only for the package root)
+* it recompiles files silently behind your back
+* it outputs an undefined amount of files (inner classes)
+
+Fortunately, the convention makes it possible to use the build dir without
+too many problems for the moment
+
+Inner classes must be located and cleaned when a problem arise,
+for the moment waf does not track the production of inner classes.
+
+Adding all the files to a task and executing it if any of the input files
+change is only annoying for the compilation times
+
+Compilation can be run using Jython[1] rather than regular Python. Instead of
+running one of the following commands:
+ ./waf configure
+ python waf configure
+You would have to run:
+ java -jar /path/to/jython.jar waf configure
+
+[1] http://www.jython.org/
+"""
+
+import os, re
+from Configure import conf
+import TaskGen, Task, Utils, Options, Build
+from TaskGen import feature, before, taskgen
+
+class_check_source = '''
+public class Test {
+ public static void main(String[] argv) {
+ Class lib;
+ if (argv.length < 1) {
+ System.err.println("Missing argument");
+ System.exit(77);
+ }
+ try {
+ lib = Class.forName(argv[0]);
+ } catch (ClassNotFoundException e) {
+ System.err.println("ClassNotFoundException");
+ System.exit(1);
+ }
+ lib = null;
+ System.exit(0);
+ }
+}
+'''
+
+@feature('jar')
+@before('apply_core')
+def jar_files(self):
+ basedir = getattr(self, 'basedir', '.')
+ destfile = getattr(self, 'destfile', 'test.jar')
+ jaropts = getattr(self, 'jaropts', [])
+ jarcreate = getattr(self, 'jarcreate', 'cf')
+
+ dir = self.path.find_dir(basedir)
+ if not dir: raise
+
+ jaropts.append('-C')
+ jaropts.append(dir.abspath(self.env))
+ jaropts.append('.')
+
+ out = self.path.find_or_declare(destfile)
+
+ tsk = self.create_task('jar_create')
+ tsk.set_outputs(out)
+ tsk.inputs = [x for x in dir.find_iter(src=0, bld=1) if x.id != out.id]
+ tsk.env['JAROPTS'] = jaropts
+ tsk.env['JARCREATE'] = jarcreate
+
+@feature('javac')
+@before('apply_core')
+def apply_java(self):
+ Utils.def_attrs(self, jarname='', jaropts='', classpath='',
+ sourcepath='.', srcdir='.', source_re='**/*.java',
+ jar_mf_attributes={}, jar_mf_classpath=[])
+
+ if getattr(self, 'source_root', None):
+ # old stuff
+ self.srcdir = self.source_root
+
+
+ nodes_lst = []
+
+ if not self.classpath:
+ if not self.env['CLASSPATH']:
+ self.env['CLASSPATH'] = '..' + os.pathsep + '.'
+ else:
+ self.env['CLASSPATH'] = self.classpath
+
+ srcdir_node = self.path.find_dir(self.srcdir)
+ if not srcdir_node:
+ raise Utils.WafError('could not find srcdir %r' % self.srcdir)
+
+ src_nodes = [x for x in srcdir_node.ant_glob(self.source_re, flat=False)]
+ bld_nodes = [x.change_ext('.class') for x in src_nodes]
+
+ self.env['OUTDIR'] = [srcdir_node.bldpath(self.env)]
+
+ tsk = self.create_task('javac')
+ tsk.set_inputs(src_nodes)
+ tsk.set_outputs(bld_nodes)
+
+ if getattr(self, 'compat', None):
+ tsk.env.append_value('JAVACFLAGS', ['-source', self.compat])
+
+ if hasattr(self, 'sourcepath'):
+ fold = [self.path.find_dir(x) for x in self.to_list(self.sourcepath)]
+ names = os.pathsep.join([x.srcpath() for x in fold])
+ else:
+ names = srcdir_node.srcpath()
+
+ if names:
+ tsk.env.append_value('JAVACFLAGS', ['-sourcepath', names])
+
+ if self.jarname:
+ jtsk = self.create_task('jar_create', bld_nodes, self.path.find_or_declare(self.jarname))
+ jtsk.set_run_after(tsk)
+
+ if not self.env.JAROPTS:
+ if self.jaropts:
+ self.env.JAROPTS = self.jaropts
+ else:
+ dirs = '.'
+ self.env.JAROPTS = ['-C', ''.join(self.env['OUTDIR']), dirs]
+
+Task.simple_task_type('jar_create', '${JAR} ${JARCREATE} ${TGT} ${JAROPTS}', color='GREEN', shell=False)
+cls = Task.simple_task_type('javac', '${JAVAC} -classpath ${CLASSPATH} -d ${OUTDIR} ${JAVACFLAGS} ${SRC}', shell=False)
+cls.color = 'BLUE'
+def post_run_javac(self):
+ """this is for cleaning the folder
+ javac creates single files for inner classes
+ but it is not possible to know which inner classes in advance"""
+
+ par = {}
+ for x in self.inputs:
+ par[x.parent.id] = x.parent
+
+ inner = {}
+ for k in par.values():
+ path = k.abspath(self.env)
+ lst = os.listdir(path)
+
+ for u in lst:
+ if u.find('$') >= 0:
+ inner_class_node = k.find_or_declare(u)
+ inner[inner_class_node.id] = inner_class_node
+
+ to_add = set(inner.keys()) - set([x.id for x in self.outputs])
+ for x in to_add:
+ self.outputs.append(inner[x])
+
+ self.cached = True # disable the cache here - inner classes are a problem
+ return Task.Task.post_run(self)
+cls.post_run = post_run_javac
+
+def detect(conf):
+ # If JAVA_PATH is set, we prepend it to the path list
+ java_path = conf.environ['PATH'].split(os.pathsep)
+ v = conf.env
+
+ if 'JAVA_HOME' in conf.environ:
+ java_path = [os.path.join(conf.environ['JAVA_HOME'], 'bin')] + java_path
+ conf.env['JAVA_HOME'] = [conf.environ['JAVA_HOME']]
+
+ for x in 'javac java jar'.split():
+ conf.find_program(x, var=x.upper(), path_list=java_path)
+ conf.env[x.upper()] = conf.cmd_to_list(conf.env[x.upper()])
+ v['JAVA_EXT'] = ['.java']
+
+ if 'CLASSPATH' in conf.environ:
+ v['CLASSPATH'] = conf.environ['CLASSPATH']
+
+ if not v['JAR']: conf.fatal('jar is required for making java packages')
+ if not v['JAVAC']: conf.fatal('javac is required for compiling java classes')
+ v['JARCREATE'] = 'cf' # can use cvf
+
+@conf
+def check_java_class(self, classname, with_classpath=None):
+ """Check if the specified java class is installed"""
+
+ import shutil
+
+ javatestdir = '.waf-javatest'
+
+ classpath = javatestdir
+ if self.env['CLASSPATH']:
+ classpath += os.pathsep + self.env['CLASSPATH']
+ if isinstance(with_classpath, str):
+ classpath += os.pathsep + with_classpath
+
+ shutil.rmtree(javatestdir, True)
+ os.mkdir(javatestdir)
+
+ java_file = open(os.path.join(javatestdir, 'Test.java'), 'w')
+ java_file.write(class_check_source)
+ java_file.close()
+
+ # Compile the source
+ Utils.exec_command(self.env['JAVAC'] + [os.path.join(javatestdir, 'Test.java')], shell=False)
+
+ # Try to run the app
+ cmd = self.env['JAVA'] + ['-cp', classpath, 'Test', classname]
+ self.log.write("%s\n" % str(cmd))
+ found = Utils.exec_command(cmd, shell=False, log=self.log)
+
+ self.check_message('Java class %s' % classname, "", not found)
+
+ shutil.rmtree(javatestdir, True)
+
+ return found
+
+@conf
+def check_jni_headers(conf):
+ """
+ Check for jni headers and libraries
+
+ On success the environment variable xxx_JAVA is added for uselib
+ """
+
+ if not conf.env.CC_NAME and not conf.env.CXX_NAME:
+ conf.fatal('load a compiler first (gcc, g++, ..)')
+
+ if not conf.env.JAVA_HOME:
+ conf.fatal('set JAVA_HOME in the system environment')
+
+ # jni requires the jvm
+ javaHome = conf.env['JAVA_HOME'][0]
+
+ b = Build.BuildContext()
+ b.load_dirs(conf.srcdir, conf.blddir)
+ dir = b.root.find_dir(conf.env.JAVA_HOME[0] + '/include')
+ f = dir.ant_glob('**/(jni|jni_md).h', flat=False)
+ incDirs = [x.parent.abspath() for x in f]
+
+ dir = b.root.find_dir(conf.env.JAVA_HOME[0])
+ f = dir.ant_glob('**/*jvm.(so|dll)', flat=False)
+ libDirs = [x.parent.abspath() for x in f] or [javaHome]
+
+ for i, d in enumerate(libDirs):
+ if conf.check(header_name='jni.h', define_name='HAVE_JNI_H', lib='jvm',
+ libpath=d, includes=incDirs, uselib_store='JAVA', uselib='JAVA'):
+ break
+ else:
+ conf.fatal('could not find lib jvm in %r (see config.log)' % libDirs)
diff --git a/third_party/waf/wafadmin/Tools/kde4.py b/third_party/waf/wafadmin/Tools/kde4.py
new file mode 100644
index 0000000..1f3bae7
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/kde4.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006 (ita)
+
+import os, sys, re
+import Options, TaskGen, Task, Utils
+from TaskGen import taskgen, feature, after
+
+class msgfmt_taskgen(TaskGen.task_gen):
+ def __init__(self, *k, **kw):
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+@feature('msgfmt')
+def init_msgfmt(self):
+ #langs = '' # for example "foo/fr foo/br"
+ self.default_install_path = '${KDE4_LOCALE_INSTALL_DIR}'
+
+@feature('msgfmt')
+@after('init_msgfmt')
+def apply_msgfmt(self):
+ for lang in self.to_list(self.langs):
+ node = self.path.find_resource(lang+'.po')
+ task = self.create_task('msgfmt', node, node.change_ext('.mo'))
+
+ if not self.bld.is_install: continue
+ langname = lang.split('/')
+ langname = langname[-1]
+ task.install_path = self.install_path + os.sep + langname + os.sep + 'LC_MESSAGES'
+ task.filename = getattr(self, 'appname', 'set_your_appname') + '.mo'
+ task.chmod = self.chmod
+
+def detect(conf):
+ kdeconfig = conf.find_program('kde4-config')
+ if not kdeconfig:
+ conf.fatal('we need kde4-config')
+ prefix = Utils.cmd_output('%s --prefix' % kdeconfig, silent=True).strip()
+ file = '%s/share/apps/cmake/modules/KDELibsDependencies.cmake' % prefix
+ try: os.stat(file)
+ except OSError:
+ file = '%s/share/kde4/apps/cmake/modules/KDELibsDependencies.cmake' % prefix
+ try: os.stat(file)
+ except OSError: conf.fatal('could not open %s' % file)
+
+ try:
+ txt = Utils.readf(file)
+ except (OSError, IOError):
+ conf.fatal('could not read %s' % file)
+
+ txt = txt.replace('\\\n', '\n')
+ fu = re.compile('#(.*)\n')
+ txt = fu.sub('', txt)
+
+ setregexp = re.compile('([sS][eE][tT]\s*\()\s*([^\s]+)\s+\"([^"]+)\"\)')
+ found = setregexp.findall(txt)
+
+ for (_, key, val) in found:
+ #print key, val
+ conf.env[key] = val
+
+ # well well, i could just write an interpreter for cmake files
+ conf.env['LIB_KDECORE']='kdecore'
+ conf.env['LIB_KDEUI'] ='kdeui'
+ conf.env['LIB_KIO'] ='kio'
+ conf.env['LIB_KHTML'] ='khtml'
+ conf.env['LIB_KPARTS'] ='kparts'
+
+ conf.env['LIBPATH_KDECORE'] = conf.env['KDE4_LIB_INSTALL_DIR']
+ conf.env['CPPPATH_KDECORE'] = conf.env['KDE4_INCLUDE_INSTALL_DIR']
+ conf.env.append_value('CPPPATH_KDECORE', conf.env['KDE4_INCLUDE_INSTALL_DIR']+"/KDE")
+
+ conf.env['MSGFMT'] = conf.find_program('msgfmt')
+
+Task.simple_task_type('msgfmt', '${MSGFMT} ${SRC} -o ${TGT}', color='BLUE', shell=False)
diff --git a/third_party/waf/wafadmin/Tools/libtool.py b/third_party/waf/wafadmin/Tools/libtool.py
new file mode 100644
index 0000000..bcc0e2f
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/libtool.py
@@ -0,0 +1,329 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Matthias Jahn, 2008, jahn matthias ath freenet punto de
+# Thomas Nagy, 2008 (ita)
+
+import sys, re, os, optparse
+
+import TaskGen, Task, Utils, preproc
+from Logs import error, debug, warn
+from TaskGen import taskgen, after, before, feature
+
+REVISION="0.1.3"
+
+"""
+if you want to use the code here, you must use something like this:
+obj = obj.create(...)
+obj.features.append("libtool")
+obj.vnum = "1.2.3" # optional, but versioned libraries are common
+"""
+
+# fake libtool files
+fakelibtool_vardeps = ['CXX', 'PREFIX']
+def fakelibtool_build(task):
+ # Writes a .la file, used by libtool
+ env = task.env
+ dest = open(task.outputs[0].abspath(env), 'w')
+ sname = task.inputs[0].name
+ fu = dest.write
+ fu("# Generated by ltmain.sh - GNU libtool 1.5.18 - (pwn3d by BKsys II code name WAF)\n")
+ if env['vnum']:
+ nums = env['vnum'].split('.')
+ libname = task.inputs[0].name
+ name3 = libname+'.'+env['vnum']
+ name2 = libname+'.'+nums[0]
+ name1 = libname
+ fu("dlname='%s'\n" % name2)
+ strn = " ".join([name3, name2, name1])
+ fu("library_names='%s'\n" % (strn) )
+ else:
+ fu("dlname='%s'\n" % sname)
+ fu("library_names='%s %s %s'\n" % (sname, sname, sname) )
+ fu("old_library=''\n")
+ vars = ' '.join(env['libtoolvars']+env['LINKFLAGS'])
+ fu("dependency_libs='%s'\n" % vars)
+ fu("current=0\n")
+ fu("age=0\nrevision=0\ninstalled=yes\nshouldnotlink=no\n")
+ fu("dlopen=''\ndlpreopen=''\n")
+ fu("libdir='%s/lib'\n" % env['PREFIX'])
+ dest.close()
+ return 0
+
+def read_la_file(path):
+ sp = re.compile(r'^([^=]+)=\'(.*)\'$')
+ dc={}
+ file = open(path, "r")
+ for line in file.readlines():
+ try:
+ #print sp.split(line.strip())
+ _, left, right, _ = sp.split(line.strip())
+ dc[left]=right
+ except ValueError:
+ pass
+ file.close()
+ return dc
+
+@feature("libtool")
+@after('apply_link')
+def apply_link_libtool(self):
+ if self.type != 'program':
+ linktask = self.link_task
+ self.latask = self.create_task('fakelibtool', linktask.outputs, linktask.outputs[0].change_ext('.la'))
+
+ if self.bld.is_install:
+ self.bld.install_files('${PREFIX}/lib', linktask.outputs[0], self.env)
+
+@feature("libtool")
+@before('apply_core')
+def apply_libtool(self):
+ self.env['vnum']=self.vnum
+
+ paths=[]
+ libs=[]
+ libtool_files=[]
+ libtool_vars=[]
+
+ for l in self.env['LINKFLAGS']:
+ if l[:2]=='-L':
+ paths.append(l[2:])
+ elif l[:2]=='-l':
+ libs.append(l[2:])
+
+ for l in libs:
+ for p in paths:
+ dict = read_la_file(p+'/lib'+l+'.la')
+ linkflags2 = dict.get('dependency_libs', '')
+ for v in linkflags2.split():
+ if v.endswith('.la'):
+ libtool_files.append(v)
+ libtool_vars.append(v)
+ continue
+ self.env.append_unique('LINKFLAGS', v)
+ break
+
+ self.env['libtoolvars']=libtool_vars
+
+ while libtool_files:
+ file = libtool_files.pop()
+ dict = read_la_file(file)
+ for v in dict['dependency_libs'].split():
+ if v[-3:] == '.la':
+ libtool_files.append(v)
+ continue
+ self.env.append_unique('LINKFLAGS', v)
+
+Task.task_type_from_func('fakelibtool', vars=fakelibtool_vardeps, func=fakelibtool_build, color='BLUE', after="cc_link cxx_link static_link")
+
+class libtool_la_file:
+ def __init__ (self, la_filename):
+ self.__la_filename = la_filename
+ #remove path and .la suffix
+ self.linkname = str(os.path.split(la_filename)[-1])[:-3]
+ if self.linkname.startswith("lib"):
+ self.linkname = self.linkname[3:]
+ # The name that we can dlopen(3).
+ self.dlname = None
+ # Names of this library
+ self.library_names = None
+ # The name of the static archive.
+ self.old_library = None
+ # Libraries that this one depends upon.
+ self.dependency_libs = None
+ # Version information for libIlmImf.
+ self.current = None
+ self.age = None
+ self.revision = None
+ # Is this an already installed library?
+ self.installed = None
+ # Should we warn about portability when linking against -modules?
+ self.shouldnotlink = None
+ # Files to dlopen/dlpreopen
+ self.dlopen = None
+ self.dlpreopen = None
+ # Directory that this library needs to be installed in:
+ self.libdir = '/usr/lib'
+ if not self.__parse():
+ raise ValueError("file %s not found!!" %(la_filename))
+
+ def __parse(self):
+ "Retrieve the variables from a file"
+ if not os.path.isfile(self.__la_filename): return 0
+ la_file=open(self.__la_filename, 'r')
+ for line in la_file:
+ ln = line.strip()
+ if not ln: continue
+ if ln[0]=='#': continue
+ (key, value) = str(ln).split('=', 1)
+ key = key.strip()
+ value = value.strip()
+ if value == "no": value = False
+ elif value == "yes": value = True
+ else:
+ try: value = int(value)
+ except ValueError: value = value.strip("'")
+ setattr(self, key, value)
+ la_file.close()
+ return 1
+
+ def get_libs(self):
+ """return linkflags for this lib"""
+ libs = []
+ if self.dependency_libs:
+ libs = str(self.dependency_libs).strip().split()
+ if libs == None:
+ libs = []
+ # add la lib and libdir
+ libs.insert(0, "-l%s" % self.linkname.strip())
+ libs.insert(0, "-L%s" % self.libdir.strip())
+ return libs
+
+ def __str__(self):
+ return '''\
+dlname = "%(dlname)s"
+library_names = "%(library_names)s"
+old_library = "%(old_library)s"
+dependency_libs = "%(dependency_libs)s"
+version = %(current)s.%(age)s.%(revision)s
+installed = "%(installed)s"
+shouldnotlink = "%(shouldnotlink)s"
+dlopen = "%(dlopen)s"
+dlpreopen = "%(dlpreopen)s"
+libdir = "%(libdir)s"''' % self.__dict__
+
+class libtool_config:
+ def __init__ (self, la_filename):
+ self.__libtool_la_file = libtool_la_file(la_filename)
+ tmp = self.__libtool_la_file
+ self.__version = [int(tmp.current), int(tmp.age), int(tmp.revision)]
+ self.__sub_la_files = []
+ self.__sub_la_files.append(la_filename)
+ self.__libs = None
+
+ def __cmp__(self, other):
+ """make it compareable with X.Y.Z versions (Y and Z are optional)"""
+ if not other:
+ return 1
+ othervers = [int(s) for s in str(other).split(".")]
+ selfvers = self.__version
+ return cmp(selfvers, othervers)
+
+ def __str__(self):
+ return "\n".join([
+ str(self.__libtool_la_file),
+ ' '.join(self.__libtool_la_file.get_libs()),
+ '* New getlibs:',
+ ' '.join(self.get_libs())
+ ])
+
+ def __get_la_libs(self, la_filename):
+ return libtool_la_file(la_filename).get_libs()
+
+ def get_libs(self):
+ """return the complete uniqe linkflags that do not
+ contain .la files anymore"""
+ libs_list = list(self.__libtool_la_file.get_libs())
+ libs_map = {}
+ while len(libs_list) > 0:
+ entry = libs_list.pop(0)
+ if entry:
+ if str(entry).endswith(".la"):
+ ## prevents duplicate .la checks
+ if entry not in self.__sub_la_files:
+ self.__sub_la_files.append(entry)
+ libs_list.extend(self.__get_la_libs(entry))
+ else:
+ libs_map[entry]=1
+ self.__libs = libs_map.keys()
+ return self.__libs
+
+ def get_libs_only_L(self):
+ if not self.__libs: self.get_libs()
+ libs = self.__libs
+ libs = [s for s in libs if str(s).startswith('-L')]
+ return libs
+
+ def get_libs_only_l(self):
+ if not self.__libs: self.get_libs()
+ libs = self.__libs
+ libs = [s for s in libs if str(s).startswith('-l')]
+ return libs
+
+ def get_libs_only_other(self):
+ if not self.__libs: self.get_libs()
+ libs = self.__libs
+ libs = [s for s in libs if not(str(s).startswith('-L')or str(s).startswith('-l'))]
+ return libs
+
+def useCmdLine():
+ """parse cmdline args and control build"""
+ usage = '''Usage: %prog [options] PathToFile.la
+example: %prog --atleast-version=2.0.0 /usr/lib/libIlmImf.la
+nor: %prog --libs /usr/lib/libamarok.la'''
+ parser = optparse.OptionParser(usage)
+ a = parser.add_option
+ a("--version", dest = "versionNumber",
+ action = "store_true", default = False,
+ help = "output version of libtool-config"
+ )
+ a("--debug", dest = "debug",
+ action = "store_true", default = False,
+ help = "enable debug"
+ )
+ a("--libs", dest = "libs",
+ action = "store_true", default = False,
+ help = "output all linker flags"
+ )
+ a("--libs-only-l", dest = "libs_only_l",
+ action = "store_true", default = False,
+ help = "output -l flags"
+ )
+ a("--libs-only-L", dest = "libs_only_L",
+ action = "store_true", default = False,
+ help = "output -L flags"
+ )
+ a("--libs-only-other", dest = "libs_only_other",
+ action = "store_true", default = False,
+ help = "output other libs (e.g. -pthread)"
+ )
+ a("--atleast-version", dest = "atleast_version",
+ default=None,
+ help = "return 0 if the module is at least version ATLEAST_VERSION"
+ )
+ a("--exact-version", dest = "exact_version",
+ default=None,
+ help = "return 0 if the module is exactly version EXACT_VERSION"
+ )
+ a("--max-version", dest = "max_version",
+ default=None,
+ help = "return 0 if the module is at no newer than version MAX_VERSION"
+ )
+
+ (options, args) = parser.parse_args()
+ if len(args) != 1 and not options.versionNumber:
+ parser.error("incorrect number of arguments")
+ if options.versionNumber:
+ print("libtool-config version %s" % REVISION)
+ return 0
+ ltf = libtool_config(args[0])
+ if options.debug:
+ print(ltf)
+ if options.atleast_version:
+ if ltf >= options.atleast_version: return 0
+ sys.exit(1)
+ if options.exact_version:
+ if ltf == options.exact_version: return 0
+ sys.exit(1)
+ if options.max_version:
+ if ltf <= options.max_version: return 0
+ sys.exit(1)
+
+ def p(x):
+ print(" ".join(x))
+ if options.libs: p(ltf.get_libs())
+ elif options.libs_only_l: p(ltf.get_libs_only_l())
+ elif options.libs_only_L: p(ltf.get_libs_only_L())
+ elif options.libs_only_other: p(ltf.get_libs_only_other())
+ return 0
+
+if __name__ == '__main__':
+ useCmdLine()
diff --git a/third_party/waf/wafadmin/Tools/lua.py b/third_party/waf/wafadmin/Tools/lua.py
new file mode 100644
index 0000000..8a6c1f4
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/lua.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Sebastian Schlingmann, 2008
+# Thomas Nagy, 2008 (ita)
+
+import TaskGen
+from TaskGen import taskgen, feature
+from Constants import *
+
+TaskGen.declare_chain(
+ name = 'luac',
+ rule = '${LUAC} -s -o ${TGT} ${SRC}',
+ ext_in = '.lua',
+ ext_out = '.luac',
+ reentrant = False,
+ install = 'LUADIR', # env variable
+)
+
+@feature('lua')
+def init_lua(self):
+ self.default_chmod = O755
+
+def detect(conf):
+ conf.find_program('luac', var='LUAC', mandatory = True)
diff --git a/third_party/waf/wafadmin/Tools/misc.py b/third_party/waf/wafadmin/Tools/misc.py
new file mode 100644
index 0000000..6ef45ae
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/misc.py
@@ -0,0 +1,429 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006 (ita)
+
+"""
+Custom objects:
+ - execute a function everytime
+ - copy a file somewhere else
+"""
+
+import shutil, re, os
+import TaskGen, Node, Task, Utils, Build, Constants
+from TaskGen import feature, taskgen, after, before
+from Logs import debug
+
+def copy_func(tsk):
+ "Make a file copy. This might be used to make other kinds of file processing (even calling a compiler is possible)"
+ env = tsk.env
+ infile = tsk.inputs[0].abspath(env)
+ outfile = tsk.outputs[0].abspath(env)
+ try:
+ shutil.copy2(infile, outfile)
+ except (OSError, IOError):
+ return 1
+ else:
+ if tsk.chmod: os.chmod(outfile, tsk.chmod)
+ return 0
+
+def action_process_file_func(tsk):
+ "Ask the function attached to the task to process it"
+ if not tsk.fun: raise Utils.WafError('task must have a function attached to it for copy_func to work!')
+ return tsk.fun(tsk)
+
+class cmd_taskgen(TaskGen.task_gen):
+ def __init__(self, *k, **kw):
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+@feature('cmd')
+def apply_cmd(self):
+ "call a command everytime"
+ if not self.fun: raise Utils.WafError('cmdobj needs a function!')
+ tsk = Task.TaskBase()
+ tsk.fun = self.fun
+ tsk.env = self.env
+ self.tasks.append(tsk)
+ tsk.install_path = self.install_path
+
+class copy_taskgen(TaskGen.task_gen):
+ "By default, make a file copy, if fun is provided, fun will make the copy (or call a compiler, etc)"
+ def __init__(self, *k, **kw):
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+@feature('copy')
+@before('apply_core')
+def apply_copy(self):
+ Utils.def_attrs(self, fun=copy_func)
+ self.default_install_path = 0
+
+ lst = self.to_list(self.source)
+ self.meths.remove('apply_core')
+
+ for filename in lst:
+ node = self.path.find_resource(filename)
+ if not node: raise Utils.WafError('cannot find input file %s for processing' % filename)
+
+ target = self.target
+ if not target or len(lst)>1: target = node.name
+
+ # TODO the file path may be incorrect
+ newnode = self.path.find_or_declare(target)
+
+ tsk = self.create_task('copy', node, newnode)
+ tsk.fun = self.fun
+ tsk.chmod = self.chmod
+ tsk.install_path = self.install_path
+
+ if not tsk.env:
+ tsk.debug()
+ raise Utils.WafError('task without an environment')
+
+def subst_func(tsk):
+ "Substitutes variables in a .in file"
+
+ m4_re = re.compile('@(\w+)@', re.M)
+
+ env = tsk.env
+ infile = tsk.inputs[0].abspath(env)
+ outfile = tsk.outputs[0].abspath(env)
+
+ code = Utils.readf(infile)
+
+ # replace all % by %% to prevent errors by % signs in the input file while string formatting
+ code = code.replace('%', '%%')
+
+ s = m4_re.sub(r'%(\1)s', code)
+
+ di = tsk.dict or {}
+ if not di:
+ names = m4_re.findall(code)
+ for i in names:
+ di[i] = env.get_flat(i) or env.get_flat(i.upper())
+
+ file = open(outfile, 'w')
+ file.write(s % di)
+ file.close()
+ if tsk.chmod: os.chmod(outfile, tsk.chmod)
+
+class subst_taskgen(TaskGen.task_gen):
+ def __init__(self, *k, **kw):
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+@feature('subst')
+@before('apply_core')
+def apply_subst(self):
+ Utils.def_attrs(self, fun=subst_func)
+ self.default_install_path = 0
+ lst = self.to_list(self.source)
+ self.meths.remove('apply_core')
+
+ self.dict = getattr(self, 'dict', {})
+
+ for filename in lst:
+ node = self.path.find_resource(filename)
+ if not node: raise Utils.WafError('cannot find input file %s for processing' % filename)
+
+ if self.target:
+ newnode = self.path.find_or_declare(self.target)
+ else:
+ newnode = node.change_ext('')
+
+ try:
+ self.dict = self.dict.get_merged_dict()
+ except AttributeError:
+ pass
+
+ if self.dict and not self.env['DICT_HASH']:
+ self.env = self.env.copy()
+ keys = list(self.dict.keys())
+ keys.sort()
+ lst = [self.dict[x] for x in keys]
+ self.env['DICT_HASH'] = str(Utils.h_list(lst))
+
+ tsk = self.create_task('copy', node, newnode)
+ tsk.fun = self.fun
+ tsk.dict = self.dict
+ tsk.dep_vars = ['DICT_HASH']
+ tsk.install_path = self.install_path
+ tsk.chmod = self.chmod
+
+ if not tsk.env:
+ tsk.debug()
+ raise Utils.WafError('task without an environment')
+
+####################
+## command-output ####
+####################
+
+class cmd_arg(object):
+ """command-output arguments for representing files or folders"""
+ def __init__(self, name, template='%s'):
+ self.name = name
+ self.template = template
+ self.node = None
+
+class input_file(cmd_arg):
+ def find_node(self, base_path):
+ assert isinstance(base_path, Node.Node)
+ self.node = base_path.find_resource(self.name)
+ if self.node is None:
+ raise Utils.WafError("Input file %s not found in " % (self.name, base_path))
+
+ def get_path(self, env, absolute):
+ if absolute:
+ return self.template % self.node.abspath(env)
+ else:
+ return self.template % self.node.srcpath(env)
+
+class output_file(cmd_arg):
+ def find_node(self, base_path):
+ assert isinstance(base_path, Node.Node)
+ self.node = base_path.find_or_declare(self.name)
+ if self.node is None:
+ raise Utils.WafError("Output file %s not found in " % (self.name, base_path))
+
+ def get_path(self, env, absolute):
+ if absolute:
+ return self.template % self.node.abspath(env)
+ else:
+ return self.template % self.node.bldpath(env)
+
+class cmd_dir_arg(cmd_arg):
+ def find_node(self, base_path):
+ assert isinstance(base_path, Node.Node)
+ self.node = base_path.find_dir(self.name)
+ if self.node is None:
+ raise Utils.WafError("Directory %s not found in " % (self.name, base_path))
+
+class input_dir(cmd_dir_arg):
+ def get_path(self, dummy_env, dummy_absolute):
+ return self.template % self.node.abspath()
+
+class output_dir(cmd_dir_arg):
+ def get_path(self, env, dummy_absolute):
+ return self.template % self.node.abspath(env)
+
+
+class command_output(Task.Task):
+ color = "BLUE"
+ def __init__(self, env, command, command_node, command_args, stdin, stdout, cwd, os_env, stderr):
+ Task.Task.__init__(self, env, normal=1)
+ assert isinstance(command, (str, Node.Node))
+ self.command = command
+ self.command_args = command_args
+ self.stdin = stdin
+ self.stdout = stdout
+ self.cwd = cwd
+ self.os_env = os_env
+ self.stderr = stderr
+
+ if command_node is not None: self.dep_nodes = [command_node]
+ self.dep_vars = [] # additional environment variables to look
+
+ def run(self):
+ task = self
+ #assert len(task.inputs) > 0
+
+ def input_path(node, template):
+ if task.cwd is None:
+ return template % node.bldpath(task.env)
+ else:
+ return template % node.abspath()
+ def output_path(node, template):
+ fun = node.abspath
+ if task.cwd is None: fun = node.bldpath
+ return template % fun(task.env)
+
+ if isinstance(task.command, Node.Node):
+ argv = [input_path(task.command, '%s')]
+ else:
+ argv = [task.command]
+
+ for arg in task.command_args:
+ if isinstance(arg, str):
+ argv.append(arg)
+ else:
+ assert isinstance(arg, cmd_arg)
+ argv.append(arg.get_path(task.env, (task.cwd is not None)))
+
+ if task.stdin:
+ stdin = open(input_path(task.stdin, '%s'))
+ else:
+ stdin = None
+
+ if task.stdout:
+ stdout = open(output_path(task.stdout, '%s'), "w")
+ else:
+ stdout = None
+
+ if task.stderr:
+ stderr = open(output_path(task.stderr, '%s'), "w")
+ else:
+ stderr = None
+
+ if task.cwd is None:
+ cwd = ('None (actually %r)' % os.getcwd())
+ else:
+ cwd = repr(task.cwd)
+ debug("command-output: cwd=%s, stdin=%r, stdout=%r, argv=%r" %
+ (cwd, stdin, stdout, argv))
+
+ if task.os_env is None:
+ os_env = os.environ
+ else:
+ os_env = task.os_env
+ command = Utils.pproc.Popen(argv, stdin=stdin, stdout=stdout, stderr=stderr, cwd=task.cwd, env=os_env)
+ return command.wait()
+
+class cmd_output_taskgen(TaskGen.task_gen):
+ def __init__(self, *k, **kw):
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+@feature('command-output')
+def init_cmd_output(self):
+ Utils.def_attrs(self,
+ stdin = None,
+ stdout = None,
+ stderr = None,
+ # the command to execute
+ command = None,
+
+ # whether it is an external command; otherwise it is assumed
+ # to be an executable binary or script that lives in the
+ # source or build tree.
+ command_is_external = False,
+
+ # extra parameters (argv) to pass to the command (excluding
+ # the command itself)
+ argv = [],
+
+ # dependencies to other objects -> this is probably not what you want (ita)
+ # values must be 'task_gen' instances (not names!)
+ dependencies = [],
+
+ # dependencies on env variable contents
+ dep_vars = [],
+
+ # input files that are implicit, i.e. they are not
+ # stdin, nor are they mentioned explicitly in argv
+ hidden_inputs = [],
+
+ # output files that are implicit, i.e. they are not
+ # stdout, nor are they mentioned explicitly in argv
+ hidden_outputs = [],
+
+ # change the subprocess to this cwd (must use obj.input_dir() or output_dir() here)
+ cwd = None,
+
+ # OS environment variables to pass to the subprocess
+ # if None, use the default environment variables unchanged
+ os_env = None)
+
+@feature('command-output')
+@after('init_cmd_output')
+def apply_cmd_output(self):
+ if self.command is None:
+ raise Utils.WafError("command-output missing command")
+ if self.command_is_external:
+ cmd = self.command
+ cmd_node = None
+ else:
+ cmd_node = self.path.find_resource(self.command)
+ assert cmd_node is not None, ('''Could not find command '%s' in source tree.
+Hint: if this is an external command,
+use command_is_external=True''') % (self.command,)
+ cmd = cmd_node
+
+ if self.cwd is None:
+ cwd = None
+ else:
+ assert isinstance(cwd, CmdDirArg)
+ self.cwd.find_node(self.path)
+
+ args = []
+ inputs = []
+ outputs = []
+
+ for arg in self.argv:
+ if isinstance(arg, cmd_arg):
+ arg.find_node(self.path)
+ if isinstance(arg, input_file):
+ inputs.append(arg.node)
+ if isinstance(arg, output_file):
+ outputs.append(arg.node)
+
+ if self.stdout is None:
+ stdout = None
+ else:
+ assert isinstance(self.stdout, str)
+ stdout = self.path.find_or_declare(self.stdout)
+ if stdout is None:
+ raise Utils.WafError("File %s not found" % (self.stdout,))
+ outputs.append(stdout)
+
+ if self.stderr is None:
+ stderr = None
+ else:
+ assert isinstance(self.stderr, str)
+ stderr = self.path.find_or_declare(self.stderr)
+ if stderr is None:
+ raise Utils.WafError("File %s not found" % (self.stderr,))
+ outputs.append(stderr)
+
+ if self.stdin is None:
+ stdin = None
+ else:
+ assert isinstance(self.stdin, str)
+ stdin = self.path.find_resource(self.stdin)
+ if stdin is None:
+ raise Utils.WafError("File %s not found" % (self.stdin,))
+ inputs.append(stdin)
+
+ for hidden_input in self.to_list(self.hidden_inputs):
+ node = self.path.find_resource(hidden_input)
+ if node is None:
+ raise Utils.WafError("File %s not found in dir %s" % (hidden_input, self.path))
+ inputs.append(node)
+
+ for hidden_output in self.to_list(self.hidden_outputs):
+ node = self.path.find_or_declare(hidden_output)
+ if node is None:
+ raise Utils.WafError("File %s not found in dir %s" % (hidden_output, self.path))
+ outputs.append(node)
+
+ if not (inputs or getattr(self, 'no_inputs', None)):
+ raise Utils.WafError('command-output objects must have at least one input file or give self.no_inputs')
+ if not (outputs or getattr(self, 'no_outputs', None)):
+ raise Utils.WafError('command-output objects must have at least one output file or give self.no_outputs')
+
+ task = command_output(self.env, cmd, cmd_node, self.argv, stdin, stdout, cwd, self.os_env, stderr)
+ Utils.copy_attrs(self, task, 'before after ext_in ext_out', only_if_set=True)
+ self.tasks.append(task)
+
+ task.inputs = inputs
+ task.outputs = outputs
+ task.dep_vars = self.to_list(self.dep_vars)
+
+ for dep in self.dependencies:
+ assert dep is not self
+ dep.post()
+ for dep_task in dep.tasks:
+ task.set_run_after(dep_task)
+
+ if not task.inputs:
+ # the case for svnversion, always run, and update the output nodes
+ task.runnable_status = type(Task.TaskBase.run)(runnable_status, task, task.__class__) # always run
+ task.post_run = type(Task.TaskBase.run)(post_run, task, task.__class__)
+
+ # TODO the case with no outputs?
+
+def post_run(self):
+ for x in self.outputs:
+ h = Utils.h_file(x.abspath(self.env))
+ self.generator.bld.node_sigs[self.env.variant()][x.id] = h
+
+def runnable_status(self):
+ return Constants.RUN_ME
+
+Task.task_type_from_func('copy', vars=[], func=action_process_file_func)
+TaskGen.task_gen.classes['command-output'] = cmd_output_taskgen
diff --git a/third_party/waf/wafadmin/Tools/msvc.py b/third_party/waf/wafadmin/Tools/msvc.py
new file mode 100644
index 0000000..2a97d19
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/msvc.py
@@ -0,0 +1,796 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Carlos Rafael Giani, 2006 (dv)
+# Tamas Pal, 2007 (folti)
+# Nicolas Mercier, 2009
+# Microsoft Visual C++/Intel C++ compiler support - beta, needs more testing
+
+# usage:
+#
+# conf.env['MSVC_VERSIONS'] = ['msvc 9.0', 'msvc 8.0', 'wsdk 7.0', 'intel 11', 'PocketPC 9.0', 'Smartphone 8.0']
+# conf.env['MSVC_TARGETS'] = ['x64']
+# conf.check_tool('msvc')
+# OR conf.check_tool('msvc', funs='no_autodetect')
+# conf.check_lib_msvc('gdi32')
+# conf.check_libs_msvc('kernel32 user32', mandatory=true)
+# ...
+# obj.uselib = 'KERNEL32 USER32 GDI32'
+#
+# platforms and targets will be tested in the order they appear;
+# the first good configuration will be used
+# supported platforms :
+# ia64, x64, x86, x86_amd64, x86_ia64
+
+# compilers supported :
+# msvc => Visual Studio, versions 7.1 (2003), 8,0 (2005), 9.0 (2008)
+# wsdk => Windows SDK, versions 6.0, 6.1, 7.0
+# icl => Intel compiler, versions 9,10,11
+# Smartphone => Compiler/SDK for Smartphone devices (armv4/v4i)
+# PocketPC => Compiler/SDK for PocketPC devices (armv4/v4i)
+
+
+import os, sys, re, string, optparse
+import Utils, TaskGen, Runner, Configure, Task, Options
+from Logs import debug, info, warn, error
+from TaskGen import after, before, feature
+
+from Configure import conftest, conf
+import ccroot, cc, cxx, ar, winres
+from libtool import read_la_file
+
+try:
+ import _winreg
+except:
+ import winreg as _winreg
+
+pproc = Utils.pproc
+
+# importlibs provided by MSVC/Platform SDK. Do NOT search them....
+g_msvc_systemlibs = """
+aclui activeds ad1 adptif adsiid advapi32 asycfilt authz bhsupp bits bufferoverflowu cabinet
+cap certadm certidl ciuuid clusapi comctl32 comdlg32 comsupp comsuppd comsuppw comsuppwd comsvcs
+credui crypt32 cryptnet cryptui d3d8thk daouuid dbgeng dbghelp dciman32 ddao35 ddao35d
+ddao35u ddao35ud delayimp dhcpcsvc dhcpsapi dlcapi dnsapi dsprop dsuiext dtchelp
+faultrep fcachdll fci fdi framedyd framedyn gdi32 gdiplus glauxglu32 gpedit gpmuuid
+gtrts32w gtrtst32hlink htmlhelp httpapi icm32 icmui imagehlp imm32 iphlpapi iprop
+kernel32 ksguid ksproxy ksuser libcmt libcmtd libcpmt libcpmtd loadperf lz32 mapi
+mapi32 mgmtapi minidump mmc mobsync mpr mprapi mqoa mqrt msacm32 mscms mscoree
+msdasc msimg32 msrating mstask msvcmrt msvcurt msvcurtd mswsock msxml2 mtx mtxdm
+netapi32 nmapinmsupp npptools ntdsapi ntdsbcli ntmsapi ntquery odbc32 odbcbcp
+odbccp32 oldnames ole32 oleacc oleaut32 oledb oledlgolepro32 opends60 opengl32
+osptk parser pdh penter pgobootrun pgort powrprof psapi ptrustm ptrustmd ptrustu
+ptrustud qosname rasapi32 rasdlg rassapi resutils riched20 rpcndr rpcns4 rpcrt4 rtm
+rtutils runtmchk scarddlg scrnsave scrnsavw secur32 sensapi setupapi sfc shell32
+shfolder shlwapi sisbkup snmpapi sporder srclient sti strsafe svcguid tapi32 thunk32
+traffic unicows url urlmon user32 userenv usp10 uuid uxtheme vcomp vcompd vdmdbg
+version vfw32 wbemuuid webpost wiaguid wininet winmm winscard winspool winstrm
+wintrust wldap32 wmiutils wow32 ws2_32 wsnmp32 wsock32 wst wtsapi32 xaswitch xolehlp
+""".split()
+
+
+all_msvc_platforms = [ ('x64', 'amd64'), ('x86', 'x86'), ('ia64', 'ia64'), ('x86_amd64', 'amd64'), ('x86_ia64', 'ia64') ]
+all_wince_platforms = [ ('armv4', 'arm'), ('armv4i', 'arm'), ('mipsii', 'mips'), ('mipsii_fp', 'mips'), ('mipsiv', 'mips'), ('mipsiv_fp', 'mips'), ('sh4', 'sh'), ('x86', 'cex86') ]
+all_icl_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')]
+
+def setup_msvc(conf, versions):
+ platforms = Utils.to_list(conf.env['MSVC_TARGETS']) or [i for i,j in all_msvc_platforms+all_icl_platforms+all_wince_platforms]
+ desired_versions = conf.env['MSVC_VERSIONS'] or [v for v,_ in versions][::-1]
+ versiondict = dict(versions)
+
+ for version in desired_versions:
+ try:
+ targets = dict(versiondict [version])
+ for target in platforms:
+ try:
+ arch,(p1,p2,p3) = targets[target]
+ compiler,revision = version.split()
+ return compiler,revision,p1,p2,p3
+ except KeyError: continue
+ except KeyError: continue
+ conf.fatal('msvc: Impossible to find a valid architecture for building (in setup_msvc)')
+
+@conf
+def get_msvc_version(conf, compiler, version, target, vcvars):
+ debug('msvc: get_msvc_version: %r %r %r', compiler, version, target)
+ batfile = os.path.join(conf.blddir, 'waf-print-msvc.bat')
+ f = open(batfile, 'w')
+ f.write("""@echo off
+set INCLUDE=
+set LIB=
+call "%s" %s
+echo PATH=%%PATH%%
+echo INCLUDE=%%INCLUDE%%
+echo LIB=%%LIB%%
+""" % (vcvars,target))
+ f.close()
+ sout = Utils.cmd_output(['cmd', '/E:on', '/V:on', '/C', batfile])
+ lines = sout.splitlines()
+
+ for x in ('Setting environment', 'Setting SDK environment', 'Intel(R) C++ Compiler'):
+ if lines[0].find(x) != -1:
+ break
+ else:
+ debug('msvc: get_msvc_version: %r %r %r -> not found', compiler, version, target)
+ conf.fatal('msvc: Impossible to find a valid architecture for building (in get_msvc_version)')
+
+ for line in lines[1:]:
+ if line.startswith('PATH='):
+ path = line[5:]
+ MSVC_PATH = path.split(';')
+ elif line.startswith('INCLUDE='):
+ MSVC_INCDIR = [i for i in line[8:].split(';') if i]
+ elif line.startswith('LIB='):
+ MSVC_LIBDIR = [i for i in line[4:].split(';') if i]
+
+ # Check if the compiler is usable at all.
+ # The detection may return 64-bit versions even on 32-bit systems, and these would fail to run.
+ env = {}
+ env.update(os.environ)
+ env.update(PATH = path)
+ compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
+ cxx = conf.find_program(compiler_name, path_list=MSVC_PATH)
+ # delete CL if exists. because it could contain parameters wich can change cl's behaviour rather catastrophically.
+ if env.has_key('CL'):
+ del(env['CL'])
+
+ try:
+ p = pproc.Popen([cxx, '/help'], env=env, stdout=pproc.PIPE, stderr=pproc.PIPE)
+ out, err = p.communicate()
+ if p.returncode != 0:
+ raise Exception('return code: %r: %r' % (p.returncode, err))
+ except Exception, e:
+ debug('msvc: get_msvc_version: %r %r %r -> failure', compiler, version, target)
+ debug(str(e))
+ conf.fatal('msvc: cannot run the compiler (in get_msvc_version)')
+ else:
+ debug('msvc: get_msvc_version: %r %r %r -> OK', compiler, version, target)
+
+ return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR)
+
+@conf
+def gather_wsdk_versions(conf, versions):
+ version_pattern = re.compile('^v..?.?\...?.?')
+ try:
+ all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Microsoft SDKs\\Windows')
+ except WindowsError:
+ try:
+ all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows')
+ except WindowsError:
+ return
+ index = 0
+ while 1:
+ try:
+ version = _winreg.EnumKey(all_versions, index)
+ except WindowsError:
+ break
+ index = index + 1
+ if not version_pattern.match(version):
+ continue
+ try:
+ msvc_version = _winreg.OpenKey(all_versions, version)
+ path,type = _winreg.QueryValueEx(msvc_version,'InstallationFolder')
+ except WindowsError:
+ continue
+ if os.path.isfile(os.path.join(path, 'bin', 'SetEnv.cmd')):
+ targets = []
+ for target,arch in all_msvc_platforms:
+ try:
+ targets.append((target, (arch, conf.get_msvc_version('wsdk', version, '/'+target, os.path.join(path, 'bin', 'SetEnv.cmd')))))
+ except Configure.ConfigurationError:
+ pass
+ versions.append(('wsdk ' + version[1:], targets))
+
+@conf
+def gather_msvc_versions(conf, versions):
+ # checks SmartPhones SDKs
+ try:
+ ce_sdk = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Windows CE Tools\\SDKs')
+ except WindowsError:
+ try:
+ ce_sdk = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows CE Tools\\SDKs')
+ except WindowsError:
+ ce_sdk = ''
+ if ce_sdk:
+ supported_wince_platforms = []
+ ce_index = 0
+ while 1:
+ try:
+ sdk_device = _winreg.EnumKey(ce_sdk, ce_index)
+ except WindowsError:
+ break
+ ce_index = ce_index + 1
+ sdk = _winreg.OpenKey(ce_sdk, sdk_device)
+ path,type = _winreg.QueryValueEx(sdk, 'SDKRootDir')
+ path=str(path)
+ path,device = os.path.split(path)
+ if not device:
+ path,device = os.path.split(path)
+ for arch,compiler in all_wince_platforms:
+ platforms = []
+ if os.path.isdir(os.path.join(path, device, 'Lib', arch)):
+ platforms.append((arch, compiler, os.path.join(path, device, 'Include', arch), os.path.join(path, device, 'Lib', arch)))
+ if platforms:
+ supported_wince_platforms.append((device, platforms))
+ # checks MSVC
+ version_pattern = re.compile('^..?\...?')
+ for vcver,vcvar in [('VCExpress','exp'), ('VisualStudio','')]:
+ try:
+ all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\'+vcver)
+ except WindowsError:
+ try:
+ all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\'+vcver)
+ except WindowsError:
+ continue
+ index = 0
+ while 1:
+ try:
+ version = _winreg.EnumKey(all_versions, index)
+ except WindowsError:
+ break
+ index = index + 1
+ if not version_pattern.match(version):
+ continue
+ try:
+ msvc_version = _winreg.OpenKey(all_versions, version + "\\Setup\\VS")
+ path,type = _winreg.QueryValueEx(msvc_version, 'ProductDir')
+ path=str(path)
+ targets = []
+ if ce_sdk:
+ for device,platforms in supported_wince_platforms:
+ cetargets = []
+ for platform,compiler,include,lib in platforms:
+ winCEpath = os.path.join(path, 'VC', 'ce')
+ if os.path.isdir(winCEpath):
+ common_bindirs,_1,_2 = conf.get_msvc_version('msvc', version, 'x86', os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat'))
+ if os.path.isdir(os.path.join(winCEpath, 'lib', platform)):
+ bindirs = [os.path.join(winCEpath, 'bin', compiler), os.path.join(winCEpath, 'bin', 'x86_'+compiler)] + common_bindirs
+ incdirs = [include, os.path.join(winCEpath, 'include'), os.path.join(winCEpath, 'atlmfc', 'include')]
+ libdirs = [lib, os.path.join(winCEpath, 'lib', platform), os.path.join(winCEpath, 'atlmfc', 'lib', platform)]
+ cetargets.append((platform, (platform, (bindirs,incdirs,libdirs))))
+ versions.append((device+' '+version, cetargets))
+ if os.path.isfile(os.path.join(path, 'VC', 'vcvarsall.bat')):
+ for target,realtarget in all_msvc_platforms[::-1]:
+ try:
+ targets.append((target, (realtarget, conf.get_msvc_version('msvc', version, target, os.path.join(path, 'VC', 'vcvarsall.bat')))))
+ except:
+ pass
+ elif os.path.isfile(os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat')):
+ try:
+ targets.append(('x86', ('x86', conf.get_msvc_version('msvc', version, 'x86', os.path.join(path, 'Common7', 'Tools', 'vsvars32.bat')))))
+ except Configure.ConfigurationError:
+ pass
+ versions.append(('msvc '+version, targets))
+
+ except WindowsError:
+ continue
+
+@conf
+def gather_icl_versions(conf, versions):
+ version_pattern = re.compile('^...?.?\....?.?')
+ try:
+ all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\C++')
+ except WindowsError:
+ try:
+ all_versions = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\C++')
+ except WindowsError:
+ return
+ index = 0
+ while 1:
+ try:
+ version = _winreg.EnumKey(all_versions, index)
+ except WindowsError:
+ break
+ index = index + 1
+ if not version_pattern.match(version):
+ continue
+ targets = []
+ for target,arch in all_icl_platforms:
+ try:
+ icl_version = _winreg.OpenKey(all_versions, version+'\\'+target)
+ path,type = _winreg.QueryValueEx(icl_version,'ProductDir')
+ if os.path.isfile(os.path.join(path, 'bin', 'iclvars.bat')):
+ try:
+ targets.append((target, (arch, conf.get_msvc_version('intel', version, target, os.path.join(path, 'bin', 'iclvars.bat')))))
+ except Configure.ConfigurationError:
+ pass
+ except WindowsError:
+ continue
+ major = version[0:2]
+ versions.append(('intel ' + major, targets))
+
+@conf
+def get_msvc_versions(conf):
+ if not conf.env.MSVC_INSTALLED_VERSIONS:
+ lst = []
+ conf.gather_msvc_versions(lst)
+ conf.gather_wsdk_versions(lst)
+ conf.gather_icl_versions(lst)
+ conf.env.MSVC_INSTALLED_VERSIONS = lst
+ return conf.env.MSVC_INSTALLED_VERSIONS
+
+@conf
+def print_all_msvc_detected(conf):
+ for version,targets in conf.env['MSVC_INSTALLED_VERSIONS']:
+ info(version)
+ for target,l in targets:
+ info("\t"+target)
+
+def detect_msvc(conf):
+ versions = get_msvc_versions(conf)
+ return setup_msvc(conf, versions)
+
+@conf
+def find_lt_names_msvc(self, libname, is_static=False):
+ """
+ Win32/MSVC specific code to glean out information from libtool la files.
+ this function is not attached to the task_gen class
+ """
+ lt_names=[
+ 'lib%s.la' % libname,
+ '%s.la' % libname,
+ ]
+
+ for path in self.env['LIBPATH']:
+ for la in lt_names:
+ laf=os.path.join(path,la)
+ dll=None
+ if os.path.exists(laf):
+ ltdict=read_la_file(laf)
+ lt_libdir=None
+ if ltdict.get('libdir', ''):
+ lt_libdir = ltdict['libdir']
+ if not is_static and ltdict.get('library_names', ''):
+ dllnames=ltdict['library_names'].split()
+ dll=dllnames[0].lower()
+ dll=re.sub('\.dll$', '', dll)
+ return (lt_libdir, dll, False)
+ elif ltdict.get('old_library', ''):
+ olib=ltdict['old_library']
+ if os.path.exists(os.path.join(path,olib)):
+ return (path, olib, True)
+ elif lt_libdir != '' and os.path.exists(os.path.join(lt_libdir,olib)):
+ return (lt_libdir, olib, True)
+ else:
+ return (None, olib, True)
+ else:
+ raise Utils.WafError('invalid libtool object file: %s' % laf)
+ return (None, None, None)
+
+@conf
+def libname_msvc(self, libname, is_static=False, mandatory=False):
+ lib = libname.lower()
+ lib = re.sub('\.lib$','',lib)
+
+ if lib in g_msvc_systemlibs:
+ return lib
+
+ lib=re.sub('^lib','',lib)
+
+ if lib == 'm':
+ return None
+
+ (lt_path, lt_libname, lt_static) = self.find_lt_names_msvc(lib, is_static)
+
+ if lt_path != None and lt_libname != None:
+ if lt_static == True:
+ # file existance check has been made by find_lt_names
+ return os.path.join(lt_path,lt_libname)
+
+ if lt_path != None:
+ _libpaths=[lt_path] + self.env['LIBPATH']
+ else:
+ _libpaths=self.env['LIBPATH']
+
+ static_libs=[
+ 'lib%ss.lib' % lib,
+ 'lib%s.lib' % lib,
+ '%ss.lib' % lib,
+ '%s.lib' %lib,
+ ]
+
+ dynamic_libs=[
+ 'lib%s.dll.lib' % lib,
+ 'lib%s.dll.a' % lib,
+ '%s.dll.lib' % lib,
+ '%s.dll.a' % lib,
+ 'lib%s_d.lib' % lib,
+ '%s_d.lib' % lib,
+ '%s.lib' %lib,
+ ]
+
+ libnames=static_libs
+ if not is_static:
+ libnames=dynamic_libs + static_libs
+
+ for path in _libpaths:
+ for libn in libnames:
+ if os.path.exists(os.path.join(path, libn)):
+ debug('msvc: lib found: %s', os.path.join(path,libn))
+ return re.sub('\.lib$', '',libn)
+
+ #if no lib can be found, just return the libname as msvc expects it
+ if mandatory:
+ self.fatal("The library %r could not be found" % libname)
+ return re.sub('\.lib$', '', libname)
+
+@conf
+def check_lib_msvc(self, libname, is_static=False, uselib_store=None, mandatory=False):
+ "This is the api to use"
+ libn = self.libname_msvc(libname, is_static, mandatory)
+
+ if not uselib_store:
+ uselib_store = libname.upper()
+
+ # Note: ideally we should be able to place the lib in the right env var, either STATICLIB or LIB,
+ # but we don't distinguish static libs from shared libs.
+ # This is ok since msvc doesn't have any special linker flag to select static libs (no env['STATICLIB_MARKER'])
+ if False and is_static: # disabled
+ self.env['STATICLIB_' + uselib_store] = [libn]
+ else:
+ self.env['LIB_' + uselib_store] = [libn]
+
+@conf
+def check_libs_msvc(self, libnames, is_static=False, mandatory=False):
+ for libname in Utils.to_list(libnames):
+ self.check_lib_msvc(libname, is_static, mandatory=mandatory)
+
+@conftest
+def no_autodetect(conf):
+ conf.eval_rules(detect.replace('autodetect', ''))
+
+
+detect = '''
+autodetect
+find_msvc
+msvc_common_flags
+cc_load_tools
+cxx_load_tools
+cc_add_flags
+cxx_add_flags
+link_add_flags
+'''
+
+@conftest
+def autodetect(conf):
+ v = conf.env
+ compiler, version, path, includes, libdirs = detect_msvc(conf)
+ v['PATH'] = path
+ v['CPPPATH'] = includes
+ v['LIBPATH'] = libdirs
+ v['MSVC_COMPILER'] = compiler
+
+def _get_prog_names(conf, compiler):
+ if compiler=='intel':
+ compiler_name = 'ICL'
+ linker_name = 'XILINK'
+ lib_name = 'XILIB'
+ else:
+ # assumes CL.exe
+ compiler_name = 'CL'
+ linker_name = 'LINK'
+ lib_name = 'LIB'
+ return compiler_name, linker_name, lib_name
+
+@conftest
+def find_msvc(conf):
+ # due to path format limitations, limit operation only to native Win32. Yeah it sucks.
+ if sys.platform != 'win32':
+ conf.fatal('MSVC module only works under native Win32 Python! cygwin is not supported yet')
+
+ v = conf.env
+
+ compiler, version, path, includes, libdirs = detect_msvc(conf)
+
+ compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler)
+ has_msvc_manifest = (compiler == 'msvc' and float(version) >= 8) or (compiler == 'wsdk' and float(version) >= 6) or (compiler == 'intel' and float(version) >= 11)
+
+ # compiler
+ cxx = None
+ if v.CXX: cxx = v.CXX
+ elif 'CXX' in conf.environ: cxx = conf.environ['CXX']
+ if not cxx: cxx = conf.find_program(compiler_name, var='CXX', path_list=path, mandatory=True)
+ cxx = conf.cmd_to_list(cxx)
+
+ # before setting anything, check if the compiler is really msvc
+ env = dict(conf.environ)
+ env.update(PATH = ';'.join(path))
+ if not Utils.cmd_output([cxx, '/nologo', '/?'], silent=True, env=env):
+ conf.fatal('the msvc compiler could not be identified')
+
+ link = v.LINK_CXX
+ if not link:
+ link = conf.find_program(linker_name, path_list=path, mandatory=True)
+ ar = v.AR
+ if not ar:
+ ar = conf.find_program(lib_name, path_list=path, mandatory=True)
+
+ # manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later
+ mt = v.MT
+ if has_msvc_manifest:
+ mt = conf.find_program('MT', path_list=path, mandatory=True)
+
+ # no more possibility of failure means the data state will be consistent
+ # we may store the data safely now
+
+ v.MSVC_MANIFEST = has_msvc_manifest
+ v.PATH = path
+ v.CPPPATH = includes
+ v.LIBPATH = libdirs
+
+ # c/c++ compiler
+ v.CC = v.CXX = cxx
+ v.CC_NAME = v.CXX_NAME = 'msvc'
+
+ v.LINK = v.LINK_CXX = link
+ if not v.LINK_CC:
+ v.LINK_CC = v.LINK_CXX
+
+ v.AR = ar
+ v.MT = mt
+ v.MTFLAGS = v.ARFLAGS = ['/NOLOGO']
+
+
+ conf.check_tool('winres')
+
+ if not conf.env.WINRC:
+ warn('Resource compiler not found. Compiling resource file is disabled')
+
+ # environment flags
+ try: v.prepend_value('CPPPATH', conf.environ['INCLUDE'])
+ except KeyError: pass
+ try: v.prepend_value('LIBPATH', conf.environ['LIB'])
+ except KeyError: pass
+
+@conftest
+def msvc_common_flags(conf):
+ v = conf.env
+
+ v['CPPFLAGS'] = ['/W3', '/nologo']
+
+ v['CCDEFINES_ST'] = '/D%s'
+ v['CXXDEFINES_ST'] = '/D%s'
+
+ # TODO just use _WIN32, which defined by the compiler itself!
+ v['CCDEFINES'] = ['WIN32'] # avoid using this, any compiler predefines the _WIN32 marcro anyway
+ v['CXXDEFINES'] = ['WIN32'] # avoid using this, any compiler predefines the _WIN32 marcro anyway
+
+ v['_CCINCFLAGS'] = []
+ v['_CCDEFFLAGS'] = []
+ v['_CXXINCFLAGS'] = []
+ v['_CXXDEFFLAGS'] = []
+
+ v['CC_SRC_F'] = ''
+ v['CC_TGT_F'] = ['/c', '/Fo']
+ v['CXX_SRC_F'] = ''
+ v['CXX_TGT_F'] = ['/c', '/Fo']
+
+ v['CPPPATH_ST'] = '/I%s' # template for adding include paths
+
+ v['AR_TGT_F'] = v['CCLNK_TGT_F'] = v['CXXLNK_TGT_F'] = '/OUT:'
+
+ # Subsystem specific flags
+ v['CPPFLAGS_CONSOLE'] = ['/SUBSYSTEM:CONSOLE']
+ v['CPPFLAGS_NATIVE'] = ['/SUBSYSTEM:NATIVE']
+ v['CPPFLAGS_POSIX'] = ['/SUBSYSTEM:POSIX']
+ v['CPPFLAGS_WINDOWS'] = ['/SUBSYSTEM:WINDOWS']
+ v['CPPFLAGS_WINDOWSCE'] = ['/SUBSYSTEM:WINDOWSCE']
+
+ # CRT specific flags
+ v['CPPFLAGS_CRT_MULTITHREADED'] = ['/MT']
+ v['CPPFLAGS_CRT_MULTITHREADED_DLL'] = ['/MD']
+
+ # TODO these are defined by the compiler itself!
+ v['CPPDEFINES_CRT_MULTITHREADED'] = ['_MT'] # this is defined by the compiler itself!
+ v['CPPDEFINES_CRT_MULTITHREADED_DLL'] = ['_MT', '_DLL'] # these are defined by the compiler itself!
+
+ v['CPPFLAGS_CRT_MULTITHREADED_DBG'] = ['/MTd']
+ v['CPPFLAGS_CRT_MULTITHREADED_DLL_DBG'] = ['/MDd']
+
+ # TODO these are defined by the compiler itself!
+ v['CPPDEFINES_CRT_MULTITHREADED_DBG'] = ['_DEBUG', '_MT'] # these are defined by the compiler itself!
+ v['CPPDEFINES_CRT_MULTITHREADED_DLL_DBG'] = ['_DEBUG', '_MT', '_DLL'] # these are defined by the compiler itself!
+
+ # compiler debug levels
+ v['CCFLAGS'] = ['/TC']
+ v['CCFLAGS_OPTIMIZED'] = ['/O2', '/DNDEBUG']
+ v['CCFLAGS_RELEASE'] = ['/O2', '/DNDEBUG']
+ v['CCFLAGS_DEBUG'] = ['/Od', '/RTC1', '/ZI']
+ v['CCFLAGS_ULTRADEBUG'] = ['/Od', '/RTC1', '/ZI']
+
+ v['CXXFLAGS'] = ['/TP', '/EHsc']
+ v['CXXFLAGS_OPTIMIZED'] = ['/O2', '/DNDEBUG']
+ v['CXXFLAGS_RELEASE'] = ['/O2', '/DNDEBUG']
+
+ v['CXXFLAGS_DEBUG'] = ['/Od', '/RTC1', '/ZI']
+ v['CXXFLAGS_ULTRADEBUG'] = ['/Od', '/RTC1', '/ZI']
+
+ # linker
+ v['LIB'] = []
+
+ v['LIB_ST'] = '%s.lib' # template for adding libs
+ v['LIBPATH_ST'] = '/LIBPATH:%s' # template for adding libpaths
+ v['STATICLIB_ST'] = 'lib%s.lib' # Note: to be able to distinguish between a static lib and a dll import lib, it's a good pratice to name the static lib 'lib%s.lib' and the dll import lib '%s.lib'
+ v['STATICLIBPATH_ST'] = '/LIBPATH:%s'
+
+ v['LINKFLAGS'] = ['/NOLOGO']
+ if v['MSVC_MANIFEST']:
+ v.append_value('LINKFLAGS', '/MANIFEST')
+ v['LINKFLAGS_DEBUG'] = ['/DEBUG']
+ v['LINKFLAGS_ULTRADEBUG'] = ['/DEBUG']
+
+ # shared library
+ v['shlib_CCFLAGS'] = ['']
+ v['shlib_CXXFLAGS'] = ['']
+ v['shlib_LINKFLAGS']= ['/DLL']
+ v['shlib_PATTERN'] = '%s.dll'
+ v['implib_PATTERN'] = '%s.lib'
+ v['IMPLIB_ST'] = '/IMPLIB:%s'
+
+ # static library
+ v['staticlib_LINKFLAGS'] = ['']
+ v['staticlib_PATTERN'] = 'lib%s.lib' # Note: to be able to distinguish between a static lib and a dll import lib, it's a good pratice to name the static lib 'lib%s.lib' and the dll import lib '%s.lib'
+
+ # program
+ v['program_PATTERN'] = '%s.exe'
+
+
+#######################################################################################################
+##### conf above, build below
+
+@after('apply_link')
+@feature('cc', 'cxx')
+def apply_flags_msvc(self):
+ if self.env.CC_NAME != 'msvc' or not self.link_task:
+ return
+
+ subsystem = getattr(self, 'subsystem', '')
+ if subsystem:
+ subsystem = '/subsystem:%s' % subsystem
+ flags = 'cstaticlib' in self.features and 'ARFLAGS' or 'LINKFLAGS'
+ self.env.append_value(flags, subsystem)
+
+ if getattr(self, 'link_task', None) and not 'cstaticlib' in self.features:
+ for f in self.env.LINKFLAGS:
+ d = f.lower()
+ if d[1:] == 'debug':
+ pdbnode = self.link_task.outputs[0].change_ext('.pdb')
+ pdbfile = pdbnode.bldpath(self.env)
+ self.link_task.outputs.append(pdbnode)
+ self.bld.install_files(self.install_path, [pdbnode], env=self.env)
+ break
+
+@feature('cprogram', 'cshlib', 'cstaticlib')
+@after('apply_lib_vars')
+@before('apply_obj_vars')
+def apply_obj_vars_msvc(self):
+ if self.env['CC_NAME'] != 'msvc':
+ return
+
+ try:
+ self.meths.remove('apply_obj_vars')
+ except ValueError:
+ pass
+
+ libpaths = getattr(self, 'libpaths', [])
+ if not libpaths: self.libpaths = libpaths
+
+ env = self.env
+ app = env.append_unique
+
+ cpppath_st = env['CPPPATH_ST']
+ lib_st = env['LIB_ST']
+ staticlib_st = env['STATICLIB_ST']
+ libpath_st = env['LIBPATH_ST']
+ staticlibpath_st = env['STATICLIBPATH_ST']
+
+ for i in env['LIBPATH']:
+ app('LINKFLAGS', libpath_st % i)
+ if not libpaths.count(i):
+ libpaths.append(i)
+
+ for i in env['LIBPATH']:
+ app('LINKFLAGS', staticlibpath_st % i)
+ if not libpaths.count(i):
+ libpaths.append(i)
+
+ # i doubt that anyone will make a fully static binary anyway
+ if not env['FULLSTATIC']:
+ if env['STATICLIB'] or env['LIB']:
+ app('LINKFLAGS', env['SHLIB_MARKER']) # TODO does SHLIB_MARKER work?
+
+ for i in env['STATICLIB']:
+ app('LINKFLAGS', staticlib_st % i)
+
+ for i in env['LIB']:
+ app('LINKFLAGS', lib_st % i)
+
+# split the manifest file processing from the link task, like for the rc processing
+
+@feature('cprogram', 'cshlib')
+@after('apply_link')
+def apply_manifest(self):
+ """Special linker for MSVC with support for embedding manifests into DLL's
+ and executables compiled by Visual Studio 2005 or probably later. Without
+ the manifest file, the binaries are unusable.
+ See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx"""
+
+ if self.env.CC_NAME == 'msvc' and self.env.MSVC_MANIFEST:
+ out_node = self.link_task.outputs[0]
+ man_node = out_node.parent.find_or_declare(out_node.name + '.manifest')
+ self.link_task.outputs.append(man_node)
+ self.link_task.do_manifest = True
+
+def exec_mf(self):
+ env = self.env
+ mtool = env['MT']
+ if not mtool:
+ return 0
+
+ self.do_manifest = False
+
+ outfile = self.outputs[0].bldpath(env)
+
+ manifest = None
+ for out_node in self.outputs:
+ if out_node.name.endswith('.manifest'):
+ manifest = out_node.bldpath(env)
+ break
+ if manifest is None:
+ # Should never get here. If we do, it means the manifest file was
+ # never added to the outputs list, thus we don't have a manifest file
+ # to embed, so we just return.
+ return 0
+
+ # embedding mode. Different for EXE's and DLL's.
+ # see: http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx
+ mode = ''
+ if 'cprogram' in self.generator.features:
+ mode = '1'
+ elif 'cshlib' in self.generator.features:
+ mode = '2'
+
+ debug('msvc: embedding manifest')
+ #flags = ' '.join(env['MTFLAGS'] or [])
+
+ lst = []
+ lst.extend([env['MT']])
+ lst.extend(Utils.to_list(env['MTFLAGS']))
+ lst.extend(Utils.to_list("-manifest"))
+ lst.extend(Utils.to_list(manifest))
+ lst.extend(Utils.to_list("-outputresource:%s;%s" % (outfile, mode)))
+
+ #cmd='%s %s -manifest "%s" -outputresource:"%s";#%s' % (mtool, flags,
+ # manifest, outfile, mode)
+ lst = [lst]
+ return self.exec_command(*lst)
+
+########## stupid evil command modification: concatenate the tokens /Fx, /doc, and /x: with the next token
+
+def exec_command_msvc(self, *k, **kw):
+ "instead of quoting all the paths and keep using the shell, we can just join the options msvc is interested in"
+ if self.env['CC_NAME'] == 'msvc':
+ if isinstance(k[0], list):
+ lst = []
+ carry = ''
+ for a in k[0]:
+ if len(a) == 3 and a.startswith('/F') or a == '/doc' or a[-1] == ':':
+ carry = a
+ else:
+ lst.append(carry + a)
+ carry = ''
+ k = [lst]
+
+ env = dict(os.environ)
+ env.update(PATH = ';'.join(self.env['PATH']))
+ kw['env'] = env
+
+ ret = self.generator.bld.exec_command(*k, **kw)
+ if ret: return ret
+ if getattr(self, 'do_manifest', None):
+ ret = exec_mf(self)
+ return ret
+
+for k in 'cc cxx winrc cc_link cxx_link static_link qxx'.split():
+ cls = Task.TaskBase.classes.get(k, None)
+ if cls:
+ cls.exec_command = exec_command_msvc
diff --git a/third_party/waf/wafadmin/Tools/nasm.py b/third_party/waf/wafadmin/Tools/nasm.py
new file mode 100644
index 0000000..43b73a7
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/nasm.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2008
+
+"""
+Nasm processing
+"""
+
+import os
+import TaskGen, Task, Utils
+from TaskGen import taskgen, before, extension
+
+nasm_str = '${NASM} ${NASM_FLAGS} ${NASM_INCLUDES} ${SRC} -o ${TGT}'
+
+EXT_NASM = ['.s', '.S', '.asm', '.ASM', '.spp', '.SPP']
+
+@before('apply_link')
+def apply_nasm_vars(self):
+
+ # flags
+ if hasattr(self, 'nasm_flags'):
+ for flag in self.to_list(self.nasm_flags):
+ self.env.append_value('NASM_FLAGS', flag)
+
+ # includes - well, if we suppose it works with c processing
+ if hasattr(self, 'includes'):
+ for inc in self.to_list(self.includes):
+ node = self.path.find_dir(inc)
+ if not node:
+ raise Utils.WafError('cannot find the dir' + inc)
+ self.env.append_value('NASM_INCLUDES', '-I%s' % node.srcpath(self.env))
+ self.env.append_value('NASM_INCLUDES', '-I%s' % node.bldpath(self.env))
+
+@extension(EXT_NASM)
+def nasm_file(self, node):
+ try: obj_ext = self.obj_ext
+ except AttributeError: obj_ext = '_%d.o' % self.idx
+
+ task = self.create_task('nasm', node, node.change_ext(obj_ext))
+ self.compiled_tasks.append(task)
+
+ self.meths.append('apply_nasm_vars')
+
+# create our action here
+Task.simple_task_type('nasm', nasm_str, color='BLUE', ext_out='.o', shell=False)
+
+def detect(conf):
+ nasm = conf.find_program(['nasm', 'yasm'], var='NASM', mandatory=True)
diff --git a/third_party/waf/wafadmin/Tools/ocaml.py b/third_party/waf/wafadmin/Tools/ocaml.py
new file mode 100644
index 0000000..a0667a4
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/ocaml.py
@@ -0,0 +1,297 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006 (ita)
+
+"ocaml support"
+
+import os, re
+import TaskGen, Utils, Task, Build
+from Logs import error
+from TaskGen import taskgen, feature, before, after, extension
+
+EXT_MLL = ['.mll']
+EXT_MLY = ['.mly']
+EXT_MLI = ['.mli']
+EXT_MLC = ['.c']
+EXT_ML = ['.ml']
+
+open_re = re.compile('^\s*open\s+([a-zA-Z]+)(;;){0,1}$', re.M)
+foo = re.compile(r"""(\(\*)|(\*\))|("(\\.|[^"\\])*"|'(\\.|[^'\\])*'|.[^()*"'\\]*)""", re.M)
+def filter_comments(txt):
+ meh = [0]
+ def repl(m):
+ if m.group(1): meh[0] += 1
+ elif m.group(2): meh[0] -= 1
+ elif not meh[0]: return m.group(0)
+ return ''
+ return foo.sub(repl, txt)
+
+def scan(self):
+ node = self.inputs[0]
+ code = filter_comments(node.read(self.env))
+
+ global open_re
+ names = []
+ import_iterator = open_re.finditer(code)
+ if import_iterator:
+ for import_match in import_iterator:
+ names.append(import_match.group(1))
+ found_lst = []
+ raw_lst = []
+ for name in names:
+ nd = None
+ for x in self.incpaths:
+ nd = x.find_resource(name.lower()+'.ml')
+ if not nd: nd = x.find_resource(name+'.ml')
+ if nd:
+ found_lst.append(nd)
+ break
+ else:
+ raw_lst.append(name)
+
+ return (found_lst, raw_lst)
+
+native_lst=['native', 'all', 'c_object']
+bytecode_lst=['bytecode', 'all']
+class ocaml_taskgen(TaskGen.task_gen):
+ def __init__(self, *k, **kw):
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+@feature('ocaml')
+def init_ml(self):
+ Utils.def_attrs(self,
+ type = 'all',
+ incpaths_lst = [],
+ bld_incpaths_lst = [],
+ mlltasks = [],
+ mlytasks = [],
+ mlitasks = [],
+ native_tasks = [],
+ bytecode_tasks = [],
+ linktasks = [],
+ bytecode_env = None,
+ native_env = None,
+ compiled_tasks = [],
+ includes = '',
+ uselib = '',
+ are_deps_set = 0)
+
+@feature('ocaml')
+@after('init_ml')
+def init_envs_ml(self):
+
+ self.islibrary = getattr(self, 'islibrary', False)
+
+ global native_lst, bytecode_lst
+ self.native_env = None
+ if self.type in native_lst:
+ self.native_env = self.env.copy()
+ if self.islibrary: self.native_env['OCALINKFLAGS'] = '-a'
+
+ self.bytecode_env = None
+ if self.type in bytecode_lst:
+ self.bytecode_env = self.env.copy()
+ if self.islibrary: self.bytecode_env['OCALINKFLAGS'] = '-a'
+
+ if self.type == 'c_object':
+ self.native_env.append_unique('OCALINKFLAGS_OPT', '-output-obj')
+
+@feature('ocaml')
+@before('apply_vars_ml')
+@after('init_envs_ml')
+def apply_incpaths_ml(self):
+ inc_lst = self.includes.split()
+ lst = self.incpaths_lst
+ for dir in inc_lst:
+ node = self.path.find_dir(dir)
+ if not node:
+ error("node not found: " + str(dir))
+ continue
+ self.bld.rescan(node)
+ if not node in lst: lst.append(node)
+ self.bld_incpaths_lst.append(node)
+ # now the nodes are added to self.incpaths_lst
+
+@feature('ocaml')
+@before('apply_core')
+def apply_vars_ml(self):
+ for i in self.incpaths_lst:
+ if self.bytecode_env:
+ app = self.bytecode_env.append_value
+ app('OCAMLPATH', '-I')
+ app('OCAMLPATH', i.srcpath(self.env))
+ app('OCAMLPATH', '-I')
+ app('OCAMLPATH', i.bldpath(self.env))
+
+ if self.native_env:
+ app = self.native_env.append_value
+ app('OCAMLPATH', '-I')
+ app('OCAMLPATH', i.bldpath(self.env))
+ app('OCAMLPATH', '-I')
+ app('OCAMLPATH', i.srcpath(self.env))
+
+ varnames = ['INCLUDES', 'OCAMLFLAGS', 'OCALINKFLAGS', 'OCALINKFLAGS_OPT']
+ for name in self.uselib.split():
+ for vname in varnames:
+ cnt = self.env[vname+'_'+name]
+ if cnt:
+ if self.bytecode_env: self.bytecode_env.append_value(vname, cnt)
+ if self.native_env: self.native_env.append_value(vname, cnt)
+
+@feature('ocaml')
+@after('apply_core')
+def apply_link_ml(self):
+
+ if self.bytecode_env:
+ ext = self.islibrary and '.cma' or '.run'
+
+ linktask = self.create_task('ocalink')
+ linktask.bytecode = 1
+ linktask.set_outputs(self.path.find_or_declare(self.target + ext))
+ linktask.obj = self
+ linktask.env = self.bytecode_env
+ self.linktasks.append(linktask)
+
+ if self.native_env:
+ if self.type == 'c_object': ext = '.o'
+ elif self.islibrary: ext = '.cmxa'
+ else: ext = ''
+
+ linktask = self.create_task('ocalinkx')
+ linktask.set_outputs(self.path.find_or_declare(self.target + ext))
+ linktask.obj = self
+ linktask.env = self.native_env
+ self.linktasks.append(linktask)
+
+ # we produce a .o file to be used by gcc
+ self.compiled_tasks.append(linktask)
+
+@extension(EXT_MLL)
+def mll_hook(self, node):
+ mll_task = self.create_task('ocamllex', node, node.change_ext('.ml'), env=self.native_env)
+ self.mlltasks.append(mll_task)
+
+ self.allnodes.append(mll_task.outputs[0])
+
+@extension(EXT_MLY)
+def mly_hook(self, node):
+ mly_task = self.create_task('ocamlyacc', node, [node.change_ext('.ml'), node.change_ext('.mli')], env=self.native_env)
+ self.mlytasks.append(mly_task)
+ self.allnodes.append(mly_task.outputs[0])
+
+ task = self.create_task('ocamlcmi', mly_task.outputs[1], mly_task.outputs[1].change_ext('.cmi'), env=self.native_env)
+
+@extension(EXT_MLI)
+def mli_hook(self, node):
+ task = self.create_task('ocamlcmi', node, node.change_ext('.cmi'), env=self.native_env)
+ self.mlitasks.append(task)
+
+@extension(EXT_MLC)
+def mlc_hook(self, node):
+ task = self.create_task('ocamlcc', node, node.change_ext('.o'), env=self.native_env)
+ self.compiled_tasks.append(task)
+
+@extension(EXT_ML)
+def ml_hook(self, node):
+ if self.native_env:
+ task = self.create_task('ocamlx', node, node.change_ext('.cmx'), env=self.native_env)
+ task.obj = self
+ task.incpaths = self.bld_incpaths_lst
+ self.native_tasks.append(task)
+
+ if self.bytecode_env:
+ task = self.create_task('ocaml', node, node.change_ext('.cmo'), env=self.bytecode_env)
+ task.obj = self
+ task.bytecode = 1
+ task.incpaths = self.bld_incpaths_lst
+ self.bytecode_tasks.append(task)
+
+def compile_may_start(self):
+ if not getattr(self, 'flag_deps', ''):
+ self.flag_deps = 1
+
+ # the evil part is that we can only compute the dependencies after the
+ # source files can be read (this means actually producing the source files)
+ if getattr(self, 'bytecode', ''): alltasks = self.obj.bytecode_tasks
+ else: alltasks = self.obj.native_tasks
+
+ self.signature() # ensure that files are scanned - unfortunately
+ tree = self.generator.bld
+ env = self.env
+ for node in self.inputs:
+ lst = tree.node_deps[self.unique_id()]
+ for depnode in lst:
+ for t in alltasks:
+ if t == self: continue
+ if depnode in t.inputs:
+ self.set_run_after(t)
+
+ # TODO necessary to get the signature right - for now
+ delattr(self, 'cache_sig')
+ self.signature()
+
+ return Task.Task.runnable_status(self)
+
+b = Task.simple_task_type
+cls = b('ocamlx', '${OCAMLOPT} ${OCAMLPATH} ${OCAMLFLAGS} ${INCLUDES} -c -o ${TGT} ${SRC}', color='GREEN', shell=False)
+cls.runnable_status = compile_may_start
+cls.scan = scan
+
+b = Task.simple_task_type
+cls = b('ocaml', '${OCAMLC} ${OCAMLPATH} ${OCAMLFLAGS} ${INCLUDES} -c -o ${TGT} ${SRC}', color='GREEN', shell=False)
+cls.runnable_status = compile_may_start
+cls.scan = scan
+
+
+b('ocamlcmi', '${OCAMLC} ${OCAMLPATH} ${INCLUDES} -o ${TGT} -c ${SRC}', color='BLUE', before="ocaml ocamlcc ocamlx")
+b('ocamlcc', 'cd ${TGT[0].bld_dir(env)} && ${OCAMLOPT} ${OCAMLFLAGS} ${OCAMLPATH} ${INCLUDES} -c ${SRC[0].abspath(env)}', color='GREEN')
+
+b('ocamllex', '${OCAMLLEX} ${SRC} -o ${TGT}', color='BLUE', before="ocamlcmi ocaml ocamlcc")
+b('ocamlyacc', '${OCAMLYACC} -b ${TGT[0].bld_base(env)} ${SRC}', color='BLUE', before="ocamlcmi ocaml ocamlcc")
+
+
+def link_may_start(self):
+ if not getattr(self, 'order', ''):
+
+ # now reorder the inputs given the task dependencies
+ if getattr(self, 'bytecode', 0): alltasks = self.obj.bytecode_tasks
+ else: alltasks = self.obj.native_tasks
+
+ # this part is difficult, we do not have a total order on the tasks
+ # if the dependencies are wrong, this may not stop
+ seen = []
+ pendant = []+alltasks
+ while pendant:
+ task = pendant.pop(0)
+ if task in seen: continue
+ for x in task.run_after:
+ if not x in seen:
+ pendant.append(task)
+ break
+ else:
+ seen.append(task)
+ self.inputs = [x.outputs[0] for x in seen]
+ self.order = 1
+ return Task.Task.runnable_status(self)
+
+act = b('ocalink', '${OCAMLC} -o ${TGT} ${INCLUDES} ${OCALINKFLAGS} ${SRC}', color='YELLOW', after="ocaml ocamlcc")
+act.runnable_status = link_may_start
+act = b('ocalinkx', '${OCAMLOPT} -o ${TGT} ${INCLUDES} ${OCALINKFLAGS_OPT} ${SRC}', color='YELLOW', after="ocamlx ocamlcc")
+act.runnable_status = link_may_start
+
+def detect(conf):
+ opt = conf.find_program('ocamlopt', var='OCAMLOPT')
+ occ = conf.find_program('ocamlc', var='OCAMLC')
+ if (not opt) or (not occ):
+ conf.fatal('The objective caml compiler was not found:\ninstall it or make it available in your PATH')
+
+ v = conf.env
+ v['OCAMLC'] = occ
+ v['OCAMLOPT'] = opt
+ v['OCAMLLEX'] = conf.find_program('ocamllex', var='OCAMLLEX')
+ v['OCAMLYACC'] = conf.find_program('ocamlyacc', var='OCAMLYACC')
+ v['OCAMLFLAGS'] = ''
+ v['OCAMLLIB'] = Utils.cmd_output(conf.env['OCAMLC']+' -where').strip()+os.sep
+ v['LIBPATH_OCAML'] = Utils.cmd_output(conf.env['OCAMLC']+' -where').strip()+os.sep
+ v['CPPPATH_OCAML'] = Utils.cmd_output(conf.env['OCAMLC']+' -where').strip()+os.sep
+ v['LIB_OCAML'] = 'camlrun'
diff --git a/third_party/waf/wafadmin/Tools/osx.py b/third_party/waf/wafadmin/Tools/osx.py
new file mode 100644
index 0000000..88ca0d9
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/osx.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy 2008
+
+"""MacOSX related tools
+
+To compile an executable into a Mac application bundle (a .app), set its 'mac_app' attribute
+ obj.mac_app = True
+
+To make a bundled shared library (a .bundle), set the 'mac_bundle' attribute:
+ obj.mac_bundle = True
+"""
+
+import os, shutil, sys, platform
+import TaskGen, Task, Build, Options, Utils
+from TaskGen import taskgen, feature, after, before
+from Logs import error, debug
+
+# plist template
+app_info = '''
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd">
+<plist version="0.9">
+<dict>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleGetInfoString</key>
+ <string>Created by Waf</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>NOTE</key>
+ <string>THIS IS A GENERATED FILE, DO NOT MODIFY</string>
+ <key>CFBundleExecutable</key>
+ <string>%s</string>
+</dict>
+</plist>
+'''
+
+# see WAF issue 285
+# and also http://trac.macports.org/ticket/17059
+@feature('cc', 'cxx')
+@before('apply_lib_vars')
+def set_macosx_deployment_target(self):
+ if self.env['MACOSX_DEPLOYMENT_TARGET']:
+ os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.env['MACOSX_DEPLOYMENT_TARGET']
+ elif 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:
+ if sys.platform == 'darwin':
+ os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2])
+
+@feature('cc', 'cxx')
+@after('apply_lib_vars')
+def apply_framework(self):
+ for x in self.to_list(self.env['FRAMEWORKPATH']):
+ frameworkpath_st = '-F%s'
+ self.env.append_unique('CXXFLAGS', frameworkpath_st % x)
+ self.env.append_unique('CCFLAGS', frameworkpath_st % x)
+ self.env.append_unique('LINKFLAGS', frameworkpath_st % x)
+
+ for x in self.to_list(self.env['FRAMEWORK']):
+ self.env.append_value('LINKFLAGS', ['-framework', x])
+
+@taskgen
+def create_bundle_dirs(self, name, out):
+ bld = self.bld
+ dir = out.parent.get_dir(name)
+
+ if not dir:
+ dir = out.__class__(name, out.parent, 1)
+ bld.rescan(dir)
+ contents = out.__class__('Contents', dir, 1)
+ bld.rescan(contents)
+ macos = out.__class__('MacOS', contents, 1)
+ bld.rescan(macos)
+ return dir
+
+def bundle_name_for_output(out):
+ name = out.name
+ k = name.rfind('.')
+ if k >= 0:
+ name = name[:k] + '.app'
+ else:
+ name = name + '.app'
+ return name
+
+@taskgen
+@after('apply_link')
+@feature('cprogram')
+def create_task_macapp(self):
+ """Use env['MACAPP'] to force *all* executables to be transformed into Mac applications
+ or use obj.mac_app = True to build specific targets as Mac apps"""
+ if self.env['MACAPP'] or getattr(self, 'mac_app', False):
+ apptask = self.create_task('macapp')
+ apptask.set_inputs(self.link_task.outputs)
+
+ out = self.link_task.outputs[0]
+
+ name = bundle_name_for_output(out)
+ dir = self.create_bundle_dirs(name, out)
+
+ n1 = dir.find_or_declare(['Contents', 'MacOS', out.name])
+
+ apptask.set_outputs([n1])
+ apptask.chmod = 0755
+ apptask.install_path = os.path.join(self.install_path, name, 'Contents', 'MacOS')
+ self.apptask = apptask
+
+@after('apply_link')
+@feature('cprogram')
+def create_task_macplist(self):
+ """Use env['MACAPP'] to force *all* executables to be transformed into Mac applications
+ or use obj.mac_app = True to build specific targets as Mac apps"""
+ if self.env['MACAPP'] or getattr(self, 'mac_app', False):
+ # check if the user specified a plist before using our template
+ if not getattr(self, 'mac_plist', False):
+ self.mac_plist = app_info
+
+ plisttask = self.create_task('macplist')
+ plisttask.set_inputs(self.link_task.outputs)
+
+ out = self.link_task.outputs[0]
+ self.mac_plist = self.mac_plist % (out.name)
+
+ name = bundle_name_for_output(out)
+ dir = self.create_bundle_dirs(name, out)
+
+ n1 = dir.find_or_declare(['Contents', 'Info.plist'])
+
+ plisttask.set_outputs([n1])
+ plisttask.mac_plist = self.mac_plist
+ plisttask.install_path = os.path.join(self.install_path, name, 'Contents')
+ self.plisttask = plisttask
+
+@after('apply_link')
+@feature('cshlib')
+def apply_link_osx(self):
+ name = self.link_task.outputs[0].name
+ if not self.install_path:
+ return
+ if getattr(self, 'vnum', None):
+ name = name.replace('.dylib', '.%s.dylib' % self.vnum)
+
+ path = os.path.join(Utils.subst_vars(self.install_path, self.env), name)
+ if '-dynamiclib' in self.env['LINKFLAGS']:
+ self.env.append_value('LINKFLAGS', '-install_name')
+ self.env.append_value('LINKFLAGS', path)
+
+@before('apply_link', 'apply_lib_vars')
+@feature('cc', 'cxx')
+def apply_bundle(self):
+ """use env['MACBUNDLE'] to force all shlibs into mac bundles
+ or use obj.mac_bundle = True for specific targets only"""
+ if not ('cshlib' in self.features or 'shlib' in self.features): return
+ if self.env['MACBUNDLE'] or getattr(self, 'mac_bundle', False):
+ self.env['shlib_PATTERN'] = self.env['macbundle_PATTERN']
+ uselib = self.uselib = self.to_list(self.uselib)
+ if not 'MACBUNDLE' in uselib: uselib.append('MACBUNDLE')
+
+@after('apply_link')
+@feature('cshlib')
+def apply_bundle_remove_dynamiclib(self):
+ if self.env['MACBUNDLE'] or getattr(self, 'mac_bundle', False):
+ if not getattr(self, 'vnum', None):
+ try:
+ self.env['LINKFLAGS'].remove('-dynamiclib')
+ self.env['LINKFLAGS'].remove('-single_module')
+ except ValueError:
+ pass
+
+# TODO REMOVE IN 1.6 (global variable)
+app_dirs = ['Contents', 'Contents/MacOS', 'Contents/Resources']
+
+def app_build(task):
+ env = task.env
+ shutil.copy2(task.inputs[0].srcpath(env), task.outputs[0].abspath(env))
+
+ return 0
+
+def plist_build(task):
+ env = task.env
+ f = open(task.outputs[0].abspath(env), "w")
+ f.write(task.mac_plist)
+ f.close()
+
+ return 0
+
+Task.task_type_from_func('macapp', vars=[], func=app_build, after="cxx_link cc_link static_link")
+Task.task_type_from_func('macplist', vars=[], func=plist_build, after="cxx_link cc_link static_link")
diff --git a/third_party/waf/wafadmin/Tools/perl.py b/third_party/waf/wafadmin/Tools/perl.py
new file mode 100644
index 0000000..85105ea
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/perl.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# andersg at 0x63.nu 2007
+
+import os
+import Task, Options, Utils
+from Configure import conf
+from TaskGen import extension, taskgen, feature, before
+
+xsubpp_str = '${PERL} ${XSUBPP} -noprototypes -typemap ${EXTUTILS_TYPEMAP} ${SRC} > ${TGT}'
+EXT_XS = ['.xs']
+
+@before('apply_incpaths', 'apply_type_vars', 'apply_lib_vars')
+@feature('perlext')
+def init_perlext(self):
+ self.uselib = self.to_list(getattr(self, 'uselib', ''))
+ if not 'PERL' in self.uselib: self.uselib.append('PERL')
+ if not 'PERLEXT' in self.uselib: self.uselib.append('PERLEXT')
+ self.env['shlib_PATTERN'] = self.env['perlext_PATTERN']
+
+@extension(EXT_XS)
+def xsubpp_file(self, node):
+ outnode = node.change_ext('.c')
+ self.create_task('xsubpp', node, outnode)
+ self.allnodes.append(outnode)
+
+Task.simple_task_type('xsubpp', xsubpp_str, color='BLUE', before='cc cxx', shell=False)
+
+@conf
+def check_perl_version(conf, minver=None):
+ """
+ Checks if perl is installed.
+
+ If installed the variable PERL will be set in environment.
+
+ Perl binary can be overridden by --with-perl-binary config variable
+
+ """
+
+ if getattr(Options.options, 'perlbinary', None):
+ conf.env.PERL = Options.options.perlbinary
+ else:
+ conf.find_program('perl', var='PERL', mandatory=True)
+
+ try:
+ version = Utils.cmd_output([conf.env.PERL, '-e', 'printf "%vd",$^V'])
+ except:
+ conf.fatal('could not determine the perl version')
+
+ conf.env.PERL_VERSION = version
+ cver = ''
+ if minver:
+ try:
+ ver = tuple(map(int, version.split('.')))
+ except:
+ conf.fatal('unsupported perl version %r' % version)
+ if ver < minver:
+ conf.fatal('perl is too old')
+
+ cver = '.'.join(map(str,minver))
+ conf.check_message('perl', cver, True, version)
+
+@conf
+def check_perl_module(conf, module):
+ """
+ Check if specified perlmodule is installed.
+
+ Minimum version can be specified by specifying it after modulename
+ like this:
+
+ conf.check_perl_module("Some::Module 2.92")
+ """
+ cmd = [conf.env['PERL'], '-e', 'use %s' % module]
+ r = Utils.pproc.call(cmd, stdout=Utils.pproc.PIPE, stderr=Utils.pproc.PIPE) == 0
+ conf.check_message("perl module %s" % module, "", r)
+ return r
+
+@conf
+def check_perl_ext_devel(conf):
+ """
+ Check for configuration needed to build perl extensions.
+
+ Sets different xxx_PERLEXT variables in the environment.
+
+ Also sets the ARCHDIR_PERL variable useful as installation path,
+ which can be overridden by --with-perl-archdir
+ """
+ if not conf.env.PERL:
+ conf.fatal('perl detection is required first')
+
+ def read_out(cmd):
+ return Utils.to_list(Utils.cmd_output([conf.env.PERL, '-MConfig', '-e', cmd]))
+
+ conf.env.LINKFLAGS_PERLEXT = read_out('print $Config{lddlflags}')
+ conf.env.CPPPATH_PERLEXT = read_out('print "$Config{archlib}/CORE"')
+ conf.env.CCFLAGS_PERLEXT = read_out('print "$Config{ccflags} $Config{cccdlflags}"')
+ conf.env.XSUBPP = read_out('print "$Config{privlib}/ExtUtils/xsubpp$Config{exe_ext}"')
+ conf.env.EXTUTILS_TYPEMAP = read_out('print "$Config{privlib}/ExtUtils/typemap"')
+ conf.env.perlext_PATTERN = '%s.' + read_out('print $Config{dlext}')[0]
+
+ if getattr(Options.options, 'perlarchdir', None):
+ conf.env.ARCHDIR_PERL = Options.options.perlarchdir
+ else:
+ conf.env.ARCHDIR_PERL = read_out('print $Config{sitearch}')[0]
+
+def set_options(opt):
+ opt.add_option("--with-perl-binary", type="string", dest="perlbinary", help = 'Specify alternate perl binary', default=None)
+ opt.add_option("--with-perl-archdir", type="string", dest="perlarchdir", help = 'Specify directory where to install arch specific files', default=None)
diff --git a/third_party/waf/wafadmin/Tools/preproc.py b/third_party/waf/wafadmin/Tools/preproc.py
new file mode 100644
index 0000000..71eb05a
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/preproc.py
@@ -0,0 +1,837 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006-2009 (ita)
+
+"""
+C/C++ preprocessor for finding dependencies
+
+Reasons for using the Waf preprocessor by default
+1. Some c/c++ extensions (Qt) require a custom preprocessor for obtaining the dependencies (.moc files)
+2. Not all compilers provide .d files for obtaining the dependencies (portability)
+3. A naive file scanner will not catch the constructs such as "#include foo()"
+4. A naive file scanner will catch unnecessary dependencies (change an unused header -> recompile everything)
+
+Regarding the speed concerns:
+a. the preprocessing is performed only when files must be compiled
+b. the macros are evaluated only for #if/#elif/#include
+c. the time penalty is about 10%
+d. system headers are not scanned
+
+Now if you do not want the Waf preprocessor, the tool "gccdeps" uses the .d files produced
+during the compilation to track the dependencies (useful when used with the boost libraries).
+It only works with gcc though, and it cannot be used with Qt builds. A dumb
+file scanner will be added in the future, so we will have most bahaviours.
+"""
+# TODO: more varargs, pragma once
+# TODO: dumb file scanner tracking all includes
+
+import re, sys, os, string
+import Logs, Build, Utils
+from Logs import debug, error
+import traceback
+
+class PreprocError(Utils.WafError):
+ pass
+
+POPFILE = '-'
+
+
+recursion_limit = 5000
+"do not loop too much on header inclusion"
+
+go_absolute = 0
+"set to 1 to track headers on files in /usr/include - else absolute paths are ignored"
+
+standard_includes = ['/usr/include']
+if sys.platform == "win32":
+ standard_includes = []
+
+use_trigraphs = 0
+'apply the trigraph rules first'
+
+strict_quotes = 0
+"Keep <> for system includes (do not search for those includes)"
+
+g_optrans = {
+'not':'!',
+'and':'&&',
+'bitand':'&',
+'and_eq':'&=',
+'or':'||',
+'bitor':'|',
+'or_eq':'|=',
+'xor':'^',
+'xor_eq':'^=',
+'compl':'~',
+}
+"these ops are for c++, to reset, set an empty dict"
+
+# ignore #warning and #error
+re_lines = re.compile(\
+ '^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*)\r*$',
+ re.IGNORECASE | re.MULTILINE)
+
+re_mac = re.compile("^[a-zA-Z_]\w*")
+re_fun = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*[(]')
+re_pragma_once = re.compile('^\s*once\s*', re.IGNORECASE)
+re_nl = re.compile('\\\\\r*\n', re.MULTILINE)
+re_cpp = re.compile(
+ r"""(/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)|//[^\n]*|("(?:\\.|[^"\\])*"|'(?:\\.|[^'\\])*'|.[^/"'\\]*)""",
+ re.MULTILINE)
+trig_def = [('??'+a, b) for a, b in zip("=-/!'()<>", r'#~\|^[]{}')]
+chr_esc = {'0':0, 'a':7, 'b':8, 't':9, 'n':10, 'f':11, 'v':12, 'r':13, '\\':92, "'":39}
+
+NUM = 'i'
+OP = 'O'
+IDENT = 'T'
+STR = 's'
+CHAR = 'c'
+
+tok_types = [NUM, STR, IDENT, OP]
+exp_types = [
+ r"""0[xX](?P<hex>[a-fA-F0-9]+)(?P<qual1>[uUlL]*)|L*?'(?P<char>(\\.|[^\\'])+)'|(?P<n1>\d+)[Ee](?P<exp0>[+-]*?\d+)(?P<float0>[fFlL]*)|(?P<n2>\d*\.\d+)([Ee](?P<exp1>[+-]*?\d+))?(?P<float1>[fFlL]*)|(?P<n4>\d+\.\d*)([Ee](?P<exp2>[+-]*?\d+))?(?P<float2>[fFlL]*)|(?P<oct>0*)(?P<n0>\d+)(?P<qual2>[uUlL]*)""",
+ r'L?"([^"\\]|\\.)*"',
+ r'[a-zA-Z_]\w*',
+ r'%:%:|<<=|>>=|\.\.\.|<<|<%|<:|<=|>>|>=|\+\+|\+=|--|->|-=|\*=|/=|%:|%=|%>|==|&&|&=|\|\||\|=|\^=|:>|!=|##|[\(\)\{\}\[\]<>\?\|\^\*\+&=:!#;,%/\-\?\~\.]',
+]
+re_clexer = re.compile('|'.join(["(?P<%s>%s)" % (name, part) for name, part in zip(tok_types, exp_types)]), re.M)
+
+accepted = 'a'
+ignored = 'i'
+undefined = 'u'
+skipped = 's'
+
+def repl(m):
+ if m.group(1):
+ return ' '
+ s = m.group(2)
+ if s is None:
+ return ''
+ return s
+
+def filter_comments(filename):
+ # return a list of tuples : keyword, line
+ code = Utils.readf(filename)
+ if use_trigraphs:
+ for (a, b) in trig_def: code = code.split(a).join(b)
+ code = re_nl.sub('', code)
+ code = re_cpp.sub(repl, code)
+ return [(m.group(2), m.group(3)) for m in re.finditer(re_lines, code)]
+
+prec = {}
+# op -> number, needed for such expressions: #if 1 && 2 != 0
+ops = ['* / %', '+ -', '<< >>', '< <= >= >', '== !=', '& | ^', '&& ||', ',']
+for x in range(len(ops)):
+ syms = ops[x]
+ for u in syms.split():
+ prec[u] = x
+
+def reduce_nums(val_1, val_2, val_op):
+ """apply arithmetic rules and try to return an integer result"""
+ #print val_1, val_2, val_op
+
+ # now perform the operation, make certain a and b are numeric
+ try: a = 0 + val_1
+ except TypeError: a = int(val_1)
+ try: b = 0 + val_2
+ except TypeError: b = int(val_2)
+
+ d = val_op
+ if d == '%': c = a%b
+ elif d=='+': c = a+b
+ elif d=='-': c = a-b
+ elif d=='*': c = a*b
+ elif d=='/': c = a/b
+ elif d=='^': c = a^b
+ elif d=='|': c = a|b
+ elif d=='||': c = int(a or b)
+ elif d=='&': c = a&b
+ elif d=='&&': c = int(a and b)
+ elif d=='==': c = int(a == b)
+ elif d=='!=': c = int(a != b)
+ elif d=='<=': c = int(a <= b)
+ elif d=='<': c = int(a < b)
+ elif d=='>': c = int(a > b)
+ elif d=='>=': c = int(a >= b)
+ elif d=='^': c = int(a^b)
+ elif d=='<<': c = a<<b
+ elif d=='>>': c = a>>b
+ else: c = 0
+ return c
+
+def get_num(lst):
+ if not lst: raise PreprocError("empty list for get_num")
+ (p, v) = lst[0]
+ if p == OP:
+ if v == '(':
+ count_par = 1
+ i = 1
+ while i < len(lst):
+ (p, v) = lst[i]
+
+ if p == OP:
+ if v == ')':
+ count_par -= 1
+ if count_par == 0:
+ break
+ elif v == '(':
+ count_par += 1
+ i += 1
+ else:
+ raise PreprocError("rparen expected %r" % lst)
+
+ (num, _) = get_term(lst[1:i])
+ return (num, lst[i+1:])
+
+ elif v == '+':
+ return get_num(lst[1:])
+ elif v == '-':
+ num, lst = get_num(lst[1:])
+ return (reduce_nums('-1', num, '*'), lst)
+ elif v == '!':
+ num, lst = get_num(lst[1:])
+ return (int(not int(num)), lst)
+ elif v == '~':
+ return (~ int(num), lst)
+ else:
+ raise PreprocError("invalid op token %r for get_num" % lst)
+ elif p == NUM:
+ return v, lst[1:]
+ elif p == IDENT:
+ # all macros should have been replaced, remaining identifiers eval to 0
+ return 0, lst[1:]
+ else:
+ raise PreprocError("invalid token %r for get_num" % lst)
+
+def get_term(lst):
+ if not lst: raise PreprocError("empty list for get_term")
+ num, lst = get_num(lst)
+ if not lst:
+ return (num, [])
+ (p, v) = lst[0]
+ if p == OP:
+ if v == '&&' and not num:
+ return (num, [])
+ elif v == '||' and num:
+ return (num, [])
+ elif v == ',':
+ # skip
+ return get_term(lst[1:])
+ elif v == '?':
+ count_par = 0
+ i = 1
+ while i < len(lst):
+ (p, v) = lst[i]
+
+ if p == OP:
+ if v == ')':
+ count_par -= 1
+ elif v == '(':
+ count_par += 1
+ elif v == ':':
+ if count_par == 0:
+ break
+ i += 1
+ else:
+ raise PreprocError("rparen expected %r" % lst)
+
+ if int(num):
+ return get_term(lst[1:i])
+ else:
+ return get_term(lst[i+1:])
+
+ else:
+ num2, lst = get_num(lst[1:])
+
+ if not lst:
+ # no more tokens to process
+ num2 = reduce_nums(num, num2, v)
+ return get_term([(NUM, num2)] + lst)
+
+ # operator precedence
+ p2, v2 = lst[0]
+ if p2 != OP:
+ raise PreprocError("op expected %r" % lst)
+
+ if prec[v2] >= prec[v]:
+ num2 = reduce_nums(num, num2, v)
+ return get_term([(NUM, num2)] + lst)
+ else:
+ num3, lst = get_num(lst[1:])
+ num3 = reduce_nums(num2, num3, v2)
+ return get_term([(NUM, num), (p, v), (NUM, num3)] + lst)
+
+
+ raise PreprocError("cannot reduce %r" % lst)
+
+def reduce_eval(lst):
+ """take a list of tokens and output true or false (#if/#elif conditions)"""
+ num, lst = get_term(lst)
+ return (NUM, num)
+
+def stringize(lst):
+ """use for converting a list of tokens to a string"""
+ lst = [str(v2) for (p2, v2) in lst]
+ return "".join(lst)
+
+def paste_tokens(t1, t2):
+ """
+ here is what we can paste:
+ a ## b -> ab
+ > ## = -> >=
+ a ## 2 -> a2
+ """
+ p1 = None
+ if t1[0] == OP and t2[0] == OP:
+ p1 = OP
+ elif t1[0] == IDENT and (t2[0] == IDENT or t2[0] == NUM):
+ p1 = IDENT
+ elif t1[0] == NUM and t2[0] == NUM:
+ p1 = NUM
+ if not p1:
+ raise PreprocError('tokens do not make a valid paste %r and %r' % (t1, t2))
+ return (p1, t1[1] + t2[1])
+
+def reduce_tokens(lst, defs, ban=[]):
+ """replace the tokens in lst, using the macros provided in defs, and a list of macros that cannot be re-applied"""
+ i = 0
+
+ while i < len(lst):
+ (p, v) = lst[i]
+
+ if p == IDENT and v == "defined":
+ del lst[i]
+ if i < len(lst):
+ (p2, v2) = lst[i]
+ if p2 == IDENT:
+ if v2 in defs:
+ lst[i] = (NUM, 1)
+ else:
+ lst[i] = (NUM, 0)
+ elif p2 == OP and v2 == '(':
+ del lst[i]
+ (p2, v2) = lst[i]
+ del lst[i] # remove the ident, and change the ) for the value
+ if v2 in defs:
+ lst[i] = (NUM, 1)
+ else:
+ lst[i] = (NUM, 0)
+ else:
+ raise PreprocError("invalid define expression %r" % lst)
+
+ elif p == IDENT and v in defs:
+
+ if isinstance(defs[v], str):
+ a, b = extract_macro(defs[v])
+ defs[v] = b
+ macro_def = defs[v]
+ to_add = macro_def[1]
+
+ if isinstance(macro_def[0], list):
+ # macro without arguments
+ del lst[i]
+ for x in xrange(len(to_add)):
+ lst.insert(i, to_add[x])
+ i += 1
+ else:
+ # collect the arguments for the funcall
+
+ args = []
+ del lst[i]
+
+ if i >= len(lst):
+ raise PreprocError("expected '(' after %r (got nothing)" % v)
+
+ (p2, v2) = lst[i]
+ if p2 != OP or v2 != '(':
+ raise PreprocError("expected '(' after %r" % v)
+
+ del lst[i]
+
+ one_param = []
+ count_paren = 0
+ while i < len(lst):
+ p2, v2 = lst[i]
+
+ del lst[i]
+ if p2 == OP and count_paren == 0:
+ if v2 == '(':
+ one_param.append((p2, v2))
+ count_paren += 1
+ elif v2 == ')':
+ if one_param: args.append(one_param)
+ break
+ elif v2 == ',':
+ if not one_param: raise PreprocError("empty param in funcall %s" % p)
+ args.append(one_param)
+ one_param = []
+ else:
+ one_param.append((p2, v2))
+ else:
+ one_param.append((p2, v2))
+ if v2 == '(': count_paren += 1
+ elif v2 == ')': count_paren -= 1
+ else:
+ raise PreprocError('malformed macro')
+
+ # substitute the arguments within the define expression
+ accu = []
+ arg_table = macro_def[0]
+ j = 0
+ while j < len(to_add):
+ (p2, v2) = to_add[j]
+
+ if p2 == OP and v2 == '#':
+ # stringize is for arguments only
+ if j+1 < len(to_add) and to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table:
+ toks = args[arg_table[to_add[j+1][1]]]
+ accu.append((STR, stringize(toks)))
+ j += 1
+ else:
+ accu.append((p2, v2))
+ elif p2 == OP and v2 == '##':
+ # token pasting, how can man invent such a complicated system?
+ if accu and j+1 < len(to_add):
+ # we have at least two tokens
+
+ t1 = accu[-1]
+
+ if to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table:
+ toks = args[arg_table[to_add[j+1][1]]]
+
+ if toks:
+ accu[-1] = paste_tokens(t1, toks[0]) #(IDENT, accu[-1][1] + toks[0][1])
+ accu.extend(toks[1:])
+ else:
+ # error, case "a##"
+ accu.append((p2, v2))
+ accu.extend(toks)
+ elif to_add[j+1][0] == IDENT and to_add[j+1][1] == '__VA_ARGS__':
+ # TODO not sure
+ # first collect the tokens
+ va_toks = []
+ st = len(macro_def[0])
+ pt = len(args)
+ for x in args[pt-st+1:]:
+ va_toks.extend(x)
+ va_toks.append((OP, ','))
+ if va_toks: va_toks.pop() # extra comma
+ if len(accu)>1:
+ (p3, v3) = accu[-1]
+ (p4, v4) = accu[-2]
+ if v3 == '##':
+ # remove the token paste
+ accu.pop()
+ if v4 == ',' and pt < st:
+ # remove the comma
+ accu.pop()
+ accu += va_toks
+ else:
+ accu[-1] = paste_tokens(t1, to_add[j+1])
+
+ j += 1
+ else:
+ # invalid paste, case "##a" or "b##"
+ accu.append((p2, v2))
+
+ elif p2 == IDENT and v2 in arg_table:
+ toks = args[arg_table[v2]]
+ reduce_tokens(toks, defs, ban+[v])
+ accu.extend(toks)
+ else:
+ accu.append((p2, v2))
+
+ j += 1
+
+
+ reduce_tokens(accu, defs, ban+[v])
+
+ for x in xrange(len(accu)-1, -1, -1):
+ lst.insert(i, accu[x])
+
+ i += 1
+
+
+def eval_macro(lst, adefs):
+ """reduce the tokens from the list lst, and try to return a 0/1 result"""
+ reduce_tokens(lst, adefs, [])
+ if not lst: raise PreprocError("missing tokens to evaluate")
+ (p, v) = reduce_eval(lst)
+ return int(v) != 0
+
+def extract_macro(txt):
+ """process a macro definition from "#define f(x, y) x * y" into a function or a simple macro without arguments"""
+ t = tokenize(txt)
+ if re_fun.search(txt):
+ p, name = t[0]
+
+ p, v = t[1]
+ if p != OP: raise PreprocError("expected open parenthesis")
+
+ i = 1
+ pindex = 0
+ params = {}
+ prev = '('
+
+ while 1:
+ i += 1
+ p, v = t[i]
+
+ if prev == '(':
+ if p == IDENT:
+ params[v] = pindex
+ pindex += 1
+ prev = p
+ elif p == OP and v == ')':
+ break
+ else:
+ raise PreprocError("unexpected token (3)")
+ elif prev == IDENT:
+ if p == OP and v == ',':
+ prev = v
+ elif p == OP and v == ')':
+ break
+ else:
+ raise PreprocError("comma or ... expected")
+ elif prev == ',':
+ if p == IDENT:
+ params[v] = pindex
+ pindex += 1
+ prev = p
+ elif p == OP and v == '...':
+ raise PreprocError("not implemented (1)")
+ else:
+ raise PreprocError("comma or ... expected (2)")
+ elif prev == '...':
+ raise PreprocError("not implemented (2)")
+ else:
+ raise PreprocError("unexpected else")
+
+ #~ print (name, [params, t[i+1:]])
+ return (name, [params, t[i+1:]])
+ else:
+ (p, v) = t[0]
+ return (v, [[], t[1:]])
+
+re_include = re.compile('^\s*(<(?P<a>.*)>|"(?P<b>.*)")')
+def extract_include(txt, defs):
+ """process a line in the form "#include foo" to return a string representing the file"""
+ m = re_include.search(txt)
+ if m:
+ if m.group('a'): return '<', m.group('a')
+ if m.group('b'): return '"', m.group('b')
+
+ # perform preprocessing and look at the result, it must match an include
+ toks = tokenize(txt)
+ reduce_tokens(toks, defs, ['waf_include'])
+
+ if not toks:
+ raise PreprocError("could not parse include %s" % txt)
+
+ if len(toks) == 1:
+ if toks[0][0] == STR:
+ return '"', toks[0][1]
+ else:
+ if toks[0][1] == '<' and toks[-1][1] == '>':
+ return stringize(toks).lstrip('<').rstrip('>')
+
+ raise PreprocError("could not parse include %s." % txt)
+
+def parse_char(txt):
+ if not txt: raise PreprocError("attempted to parse a null char")
+ if txt[0] != '\\':
+ return ord(txt)
+ c = txt[1]
+ if c == 'x':
+ if len(txt) == 4 and txt[3] in string.hexdigits: return int(txt[2:], 16)
+ return int(txt[2:], 16)
+ elif c.isdigit():
+ if c == '0' and len(txt)==2: return 0
+ for i in 3, 2, 1:
+ if len(txt) > i and txt[1:1+i].isdigit():
+ return (1+i, int(txt[1:1+i], 8))
+ else:
+ try: return chr_esc[c]
+ except KeyError: raise PreprocError("could not parse char literal '%s'" % txt)
+
+@Utils.run_once
+def tokenize_private(s):
+ ret = []
+ for match in re_clexer.finditer(s):
+ m = match.group
+ for name in tok_types:
+ v = m(name)
+ if v:
+ if name == IDENT:
+ try: v = g_optrans[v]; name = OP
+ except KeyError:
+ # c++ specific
+ if v.lower() == "true":
+ v = 1
+ name = NUM
+ elif v.lower() == "false":
+ v = 0
+ name = NUM
+ elif name == NUM:
+ if m('oct'): v = int(v, 8)
+ elif m('hex'): v = int(m('hex'), 16)
+ elif m('n0'): v = m('n0')
+ else:
+ v = m('char')
+ if v: v = parse_char(v)
+ else: v = m('n2') or m('n4')
+ elif name == OP:
+ if v == '%:': v = '#'
+ elif v == '%:%:': v = '##'
+ elif name == STR:
+ # remove the quotes around the string
+ v = v[1:-1]
+ ret.append((name, v))
+ break
+ return ret
+
+def tokenize(s):
+ """convert a string into a list of tokens (shlex.split does not apply to c/c++/d)"""
+ return tokenize_private(s)[:]
+
+@Utils.run_once
+def define_name(line):
+ return re_mac.match(line).group(0)
+
+class c_parser(object):
+ def __init__(self, nodepaths=None, defines=None):
+ #self.lines = txt.split('\n')
+ self.lines = []
+
+ if defines is None:
+ self.defs = {}
+ else:
+ self.defs = dict(defines) # make a copy
+ self.state = []
+
+ self.env = None # needed for the variant when searching for files
+
+ self.count_files = 0
+ self.currentnode_stack = []
+
+ self.nodepaths = nodepaths or []
+
+ self.nodes = []
+ self.names = []
+
+ # file added
+ self.curfile = ''
+ self.ban_includes = set([])
+
+ def cached_find_resource(self, node, filename):
+ try:
+ nd = node.bld.cache_nd
+ except:
+ nd = node.bld.cache_nd = {}
+
+ tup = (node.id, filename)
+ try:
+ return nd[tup]
+ except KeyError:
+ ret = node.find_resource(filename)
+ nd[tup] = ret
+ return ret
+
+ def tryfind(self, filename):
+ self.curfile = filename
+
+ # for msvc it should be a for loop on the whole stack
+ found = self.cached_find_resource(self.currentnode_stack[-1], filename)
+
+ for n in self.nodepaths:
+ if found:
+ break
+ found = self.cached_find_resource(n, filename)
+
+ if found:
+ self.nodes.append(found)
+ if filename[-4:] != '.moc':
+ self.addlines(found)
+ else:
+ if not filename in self.names:
+ self.names.append(filename)
+ return found
+
+ def addlines(self, node):
+
+ self.currentnode_stack.append(node.parent)
+ filepath = node.abspath(self.env)
+
+ self.count_files += 1
+ if self.count_files > recursion_limit: raise PreprocError("recursion limit exceeded")
+ pc = self.parse_cache
+ debug('preproc: reading file %r', filepath)
+ try:
+ lns = pc[filepath]
+ except KeyError:
+ pass
+ else:
+ self.lines.extend(lns)
+ return
+
+ try:
+ lines = filter_comments(filepath)
+ lines.append((POPFILE, ''))
+ lines.reverse()
+ pc[filepath] = lines # cache the lines filtered
+ self.lines.extend(lines)
+ except IOError:
+ raise PreprocError("could not read the file %s" % filepath)
+ except Exception:
+ if Logs.verbose > 0:
+ error("parsing %s failed" % filepath)
+ traceback.print_exc()
+
+ def start(self, node, env):
+ debug('preproc: scanning %s (in %s)', node.name, node.parent.name)
+
+ self.env = env
+ variant = node.variant(env)
+ bld = node.__class__.bld
+ try:
+ self.parse_cache = bld.parse_cache
+ except AttributeError:
+ bld.parse_cache = {}
+ self.parse_cache = bld.parse_cache
+
+ self.addlines(node)
+ if env['DEFLINES']:
+ lst = [('define', x) for x in env['DEFLINES']]
+ lst.reverse()
+ self.lines.extend(lst)
+
+ while self.lines:
+ (kind, line) = self.lines.pop()
+ if kind == POPFILE:
+ self.currentnode_stack.pop()
+ continue
+ try:
+ self.process_line(kind, line)
+ except Exception, e:
+ if Logs.verbose:
+ debug('preproc: line parsing failed (%s): %s %s', e, line, Utils.ex_stack())
+
+ def process_line(self, token, line):
+ """
+ WARNING: a new state must be added for if* because the endif
+ """
+ ve = Logs.verbose
+ if ve: debug('preproc: line is %s - %s state is %s', token, line, self.state)
+ state = self.state
+
+ # make certain we define the state if we are about to enter in an if block
+ if token in ['ifdef', 'ifndef', 'if']:
+ state.append(undefined)
+ elif token == 'endif':
+ state.pop()
+
+ # skip lines when in a dead 'if' branch, wait for the endif
+ if not token in ['else', 'elif', 'endif']:
+ if skipped in self.state or ignored in self.state:
+ return
+
+ if token == 'if':
+ ret = eval_macro(tokenize(line), self.defs)
+ if ret: state[-1] = accepted
+ else: state[-1] = ignored
+ elif token == 'ifdef':
+ m = re_mac.match(line)
+ if m and m.group(0) in self.defs: state[-1] = accepted
+ else: state[-1] = ignored
+ elif token == 'ifndef':
+ m = re_mac.match(line)
+ if m and m.group(0) in self.defs: state[-1] = ignored
+ else: state[-1] = accepted
+ elif token == 'include' or token == 'import':
+ (kind, inc) = extract_include(line, self.defs)
+ if inc in self.ban_includes: return
+ if token == 'import': self.ban_includes.add(inc)
+ if ve: debug('preproc: include found %s (%s) ', inc, kind)
+ if kind == '"' or not strict_quotes:
+ self.tryfind(inc)
+ elif token == 'elif':
+ if state[-1] == accepted:
+ state[-1] = skipped
+ elif state[-1] == ignored:
+ if eval_macro(tokenize(line), self.defs):
+ state[-1] = accepted
+ elif token == 'else':
+ if state[-1] == accepted: state[-1] = skipped
+ elif state[-1] == ignored: state[-1] = accepted
+ elif token == 'define':
+ try:
+ self.defs[define_name(line)] = line
+ except:
+ raise PreprocError("invalid define line %s" % line)
+ elif token == 'undef':
+ m = re_mac.match(line)
+ if m and m.group(0) in self.defs:
+ self.defs.__delitem__(m.group(0))
+ #print "undef %s" % name
+ elif token == 'pragma':
+ if re_pragma_once.match(line.lower()):
+ self.ban_includes.add(self.curfile)
+
+def get_deps(node, env, nodepaths=[]):
+ """
+ Get the dependencies using a c/c++ preprocessor, this is required for finding dependencies of the kind
+ #include some_macro()
+ """
+
+ gruik = c_parser(nodepaths)
+ gruik.start(node, env)
+ return (gruik.nodes, gruik.names)
+
+#################### dumb dependency scanner
+
+re_inc = re.compile(\
+ '^[ \t]*(#|%:)[ \t]*(include)[ \t]*(.*)\r*$',
+ re.IGNORECASE | re.MULTILINE)
+
+def lines_includes(filename):
+ code = Utils.readf(filename)
+ if use_trigraphs:
+ for (a, b) in trig_def: code = code.split(a).join(b)
+ code = re_nl.sub('', code)
+ code = re_cpp.sub(repl, code)
+ return [(m.group(2), m.group(3)) for m in re.finditer(re_inc, code)]
+
+def get_deps_simple(node, env, nodepaths=[], defines={}):
+ """
+ Get the dependencies by just looking recursively at the #include statements
+ """
+
+ nodes = []
+ names = []
+
+ def find_deps(node):
+ lst = lines_includes(node.abspath(env))
+
+ for (_, line) in lst:
+ (t, filename) = extract_include(line, defines)
+ if filename in names:
+ continue
+
+ if filename.endswith('.moc'):
+ names.append(filename)
+
+ found = None
+ for n in nodepaths:
+ if found:
+ break
+ found = n.find_resource(filename)
+
+ if not found:
+ if not filename in names:
+ names.append(filename)
+ elif not found in nodes:
+ nodes.append(found)
+ find_deps(node)
+
+ find_deps(node)
+ return (nodes, names)
diff --git a/third_party/waf/wafadmin/Tools/python.py b/third_party/waf/wafadmin/Tools/python.py
new file mode 100644
index 0000000..cd96b65
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/python.py
@@ -0,0 +1,432 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2007 (ita)
+# Gustavo Carneiro (gjc), 2007
+
+"Python support"
+
+import os, sys
+import TaskGen, Utils, Options
+from Logs import debug, warn, info
+from TaskGen import extension, before, after, feature
+from Configure import conf
+from config_c import parse_flags
+
+EXT_PY = ['.py']
+FRAG_2 = '''
+#include "Python.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+ void Py_Initialize(void);
+ void Py_Finalize(void);
+#ifdef __cplusplus
+}
+#endif
+int main()
+{
+ Py_Initialize();
+ Py_Finalize();
+ return 0;
+}
+'''
+
+@feature('pyext')
+@before('apply_incpaths', 'apply_lib_vars', 'apply_type_vars', 'apply_bundle')
+@after('vars_target_cshlib')
+def init_pyext(self):
+ self.default_install_path = '${PYTHONARCHDIR}'
+ self.uselib = self.to_list(getattr(self, 'uselib', ''))
+ if not 'PYEXT' in self.uselib:
+ self.uselib.append('PYEXT')
+ self.env['MACBUNDLE'] = True
+
+@before('apply_link', 'apply_lib_vars', 'apply_type_vars')
+@after('apply_bundle')
+@feature('pyext')
+def pyext_shlib_ext(self):
+ # override shlib_PATTERN set by the osx module
+ self.env['shlib_PATTERN'] = self.env['pyext_PATTERN']
+
+@before('apply_incpaths', 'apply_lib_vars', 'apply_type_vars')
+@feature('pyembed')
+def init_pyembed(self):
+ self.uselib = self.to_list(getattr(self, 'uselib', ''))
+ if not 'PYEMBED' in self.uselib:
+ self.uselib.append('PYEMBED')
+
+@extension(EXT_PY)
+def process_py(self, node):
+ if not (self.bld.is_install and self.install_path):
+ return
+ def inst_py(ctx):
+ install_pyfile(self, node)
+ self.bld.add_post_fun(inst_py)
+
+def install_pyfile(self, node):
+ path = self.bld.get_install_path(self.install_path + os.sep + node.name, self.env)
+
+ self.bld.install_files(self.install_path, [node], self.env, self.chmod, postpone=False)
+ if self.bld.is_install < 0:
+ info("* removing byte compiled python files")
+ for x in 'co':
+ try:
+ os.remove(path + x)
+ except OSError:
+ pass
+
+ if self.bld.is_install > 0:
+ if self.env['PYC'] or self.env['PYO']:
+ info("* byte compiling %r" % path)
+
+ if self.env['PYC']:
+ program = ("""
+import sys, py_compile
+for pyfile in sys.argv[1:]:
+ py_compile.compile(pyfile, pyfile + 'c')
+""")
+ argv = [self.env['PYTHON'], '-c', program, path]
+ ret = Utils.pproc.Popen(argv).wait()
+ if ret:
+ raise Utils.WafError('bytecode compilation failed %r' % path)
+
+ if self.env['PYO']:
+ program = ("""
+import sys, py_compile
+for pyfile in sys.argv[1:]:
+ py_compile.compile(pyfile, pyfile + 'o')
+""")
+ argv = [self.env['PYTHON'], self.env['PYFLAGS_OPT'], '-c', program, path]
+ ret = Utils.pproc.Popen(argv).wait()
+ if ret:
+ raise Utils.WafError('bytecode compilation failed %r' % path)
+
+# COMPAT
+class py_taskgen(TaskGen.task_gen):
+ def __init__(self, *k, **kw):
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+@before('apply_core')
+@after('vars_target_cprogram', 'vars_target_cshlib')
+@feature('py')
+def init_py(self):
+ self.default_install_path = '${PYTHONDIR}'
+
+def _get_python_variables(python_exe, variables, imports=['import sys']):
+ """Run a python interpreter and print some variables"""
+ program = list(imports)
+ program.append('')
+ for v in variables:
+ program.append("print(repr(%s))" % v)
+ os_env = dict(os.environ)
+ try:
+ del os_env['MACOSX_DEPLOYMENT_TARGET'] # see comments in the OSX tool
+ except KeyError:
+ pass
+ proc = Utils.pproc.Popen([python_exe, "-c", '\n'.join(program)], stdout=Utils.pproc.PIPE, env=os_env)
+ output = proc.communicate()[0].split("\n") # do not touch, python3
+ if proc.returncode:
+ if Options.options.verbose:
+ warn("Python program to extract python configuration variables failed:\n%s"
+ % '\n'.join(["line %03i: %s" % (lineno+1, line) for lineno, line in enumerate(program)]))
+ raise RuntimeError
+ return_values = []
+ for s in output:
+ s = s.strip()
+ if not s:
+ continue
+ if s == 'None':
+ return_values.append(None)
+ elif (s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"'):
+ return_values.append(eval(s))
+ elif s[0].isdigit():
+ return_values.append(int(s))
+ else: break
+ return return_values
+
+@conf
+def check_python_headers(conf, mandatory=True):
+ """Check for headers and libraries necessary to extend or embed python.
+
+ On success the environment variables xxx_PYEXT and xxx_PYEMBED are added for uselib
+
+ PYEXT: for compiling python extensions
+ PYEMBED: for embedding a python interpreter"""
+
+ if not conf.env['CC_NAME'] and not conf.env['CXX_NAME']:
+ conf.fatal('load a compiler first (gcc, g++, ..)')
+
+ if not conf.env['PYTHON_VERSION']:
+ conf.check_python_version()
+
+ env = conf.env
+ python = env['PYTHON']
+ if not python:
+ conf.fatal('could not find the python executable')
+
+ ## On Mac OSX we need to use mac bundles for python plugins
+ if Options.platform == 'darwin':
+ conf.check_tool('osx')
+
+ try:
+ # Get some python configuration variables using distutils
+ v = 'prefix SO SYSLIBS LDFLAGS SHLIBS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET LDVERSION'.split()
+ (python_prefix, python_SO, python_SYSLIBS, python_LDFLAGS, python_SHLIBS,
+ python_LIBDIR, python_LIBPL, INCLUDEPY, Py_ENABLE_SHARED,
+ python_MACOSX_DEPLOYMENT_TARGET, python_LDVERSION) = \
+ _get_python_variables(python, ["get_config_var('%s') or ''" % x for x in v],
+ ['from distutils.sysconfig import get_config_var'])
+ except RuntimeError:
+ conf.fatal("Python development headers not found (-v for details).")
+
+ conf.log.write("""Configuration returned from %r:
+python_prefix = %r
+python_SO = %r
+python_SYSLIBS = %r
+python_LDFLAGS = %r
+python_SHLIBS = %r
+python_LIBDIR = %r
+python_LIBPL = %r
+INCLUDEPY = %r
+Py_ENABLE_SHARED = %r
+MACOSX_DEPLOYMENT_TARGET = %r
+LDVERSION = %r
+""" % (python, python_prefix, python_SO, python_SYSLIBS, python_LDFLAGS, python_SHLIBS,
+ python_LIBDIR, python_LIBPL, INCLUDEPY, Py_ENABLE_SHARED, python_MACOSX_DEPLOYMENT_TARGET,
+ python_LDVERSION))
+
+ # Allow some python overrides from env vars for cross-compiling
+ os_env = dict(os.environ)
+
+ override_python_LDFLAGS = os_env.get('python_LDFLAGS', None)
+ if override_python_LDFLAGS is not None:
+ conf.log.write("python_LDFLAGS override from environment = %r\n" % (override_python_LDFLAGS))
+ python_LDFLAGS = override_python_LDFLAGS
+
+ override_python_LIBDIR = os_env.get('python_LIBDIR', None)
+ if override_python_LIBDIR is not None:
+ conf.log.write("python_LIBDIR override from environment = %r\n" % (override_python_LIBDIR))
+ python_LIBDIR = override_python_LIBDIR
+
+ if python_MACOSX_DEPLOYMENT_TARGET:
+ conf.env['MACOSX_DEPLOYMENT_TARGET'] = python_MACOSX_DEPLOYMENT_TARGET
+ conf.environ['MACOSX_DEPLOYMENT_TARGET'] = python_MACOSX_DEPLOYMENT_TARGET
+
+ env['pyext_PATTERN'] = '%s'+python_SO
+
+ # Check for python libraries for embedding
+ if python_SYSLIBS is not None:
+ for lib in python_SYSLIBS.split():
+ if lib.startswith('-l'):
+ lib = lib[2:] # strip '-l'
+ env.append_value('LIB_PYEMBED', lib)
+
+ if python_SHLIBS is not None:
+ for lib in python_SHLIBS.split():
+ if lib.startswith('-l'):
+ env.append_value('LIB_PYEMBED', lib[2:]) # strip '-l'
+ else:
+ env.append_value('LINKFLAGS_PYEMBED', lib)
+
+ if Options.platform != 'darwin' and python_LDFLAGS:
+ parse_flags(python_LDFLAGS, 'PYEMBED', env)
+
+ result = False
+ if not python_LDVERSION:
+ python_LDVERSION = env['PYTHON_VERSION']
+ name = 'python' + python_LDVERSION
+
+ if python_LIBDIR is not None:
+ path = [python_LIBDIR]
+ conf.log.write("\n\n# Trying LIBDIR: %r\n" % path)
+ result = conf.check(lib=name, uselib='PYEMBED', libpath=path)
+
+ if not result and python_LIBPL is not None:
+ conf.log.write("\n\n# try again with -L$python_LIBPL (some systems don't install the python library in $prefix/lib)\n")
+ path = [python_LIBPL]
+ result = conf.check(lib=name, uselib='PYEMBED', libpath=path)
+
+ if not result:
+ conf.log.write("\n\n# try again with -L$prefix/libs, and pythonXY name rather than pythonX.Y (win32)\n")
+ path = [os.path.join(python_prefix, "libs")]
+ name = 'python' + python_LDVERSION.replace('.', '')
+ result = conf.check(lib=name, uselib='PYEMBED', libpath=path)
+
+ if result:
+ env['LIBPATH_PYEMBED'] = path
+ env.append_value('LIB_PYEMBED', name)
+ else:
+ conf.log.write("\n\n### LIB NOT FOUND\n")
+
+ # under certain conditions, python extensions must link to
+ # python libraries, not just python embedding programs.
+ if (sys.platform == 'win32' or sys.platform.startswith('os2')
+ or sys.platform == 'darwin' or Py_ENABLE_SHARED):
+ env['LIBPATH_PYEXT'] = env['LIBPATH_PYEMBED']
+ env['LIB_PYEXT'] = env['LIB_PYEMBED']
+
+ # We check that pythonX.Y-config exists, and if it exists we
+ # use it to get only the includes, else fall back to distutils.
+ python_config = conf.find_program(
+ 'python%s-config' % ('.'.join(env['PYTHON_VERSION'].split('.')[:2])),
+ var='PYTHON_CONFIG')
+ if not python_config:
+ python_config = conf.find_program(
+ 'python-config-%s' % ('.'.join(env['PYTHON_VERSION'].split('.')[:2])),
+ var='PYTHON_CONFIG')
+
+ includes = []
+ if python_config:
+ for incstr in Utils.cmd_output("%s --includes" % (python_config,)).strip().split():
+ # strip the -I or /I
+ if (incstr.startswith('-I')
+ or incstr.startswith('/I')):
+ incstr = incstr[2:]
+ # append include path, unless already given
+ if incstr not in includes:
+ includes.append(incstr)
+ conf.log.write("Include path for Python extensions "
+ "(found via python-config --includes): %r\n" % (includes,))
+ env['CPPPATH_PYEXT'] = includes
+ env['CPPPATH_PYEMBED'] = includes
+ else:
+ conf.log.write("Include path for Python extensions "
+ "(found via distutils module): %r\n" % (INCLUDEPY,))
+ env['CPPPATH_PYEXT'] = [INCLUDEPY]
+ env['CPPPATH_PYEMBED'] = [INCLUDEPY]
+
+ # Code using the Python API needs to be compiled with -fno-strict-aliasing
+ if env['CC_NAME'] == 'gcc':
+ env.append_value('CCFLAGS_PYEMBED', '-fno-strict-aliasing')
+ env.append_value('CCFLAGS_PYEXT', '-fno-strict-aliasing')
+ if env['CXX_NAME'] == 'gcc':
+ env.append_value('CXXFLAGS_PYEMBED', '-fno-strict-aliasing')
+ env.append_value('CXXFLAGS_PYEXT', '-fno-strict-aliasing')
+
+ # See if it compiles
+ conf.check(define_name='HAVE_PYTHON_H',
+ uselib='PYEMBED', fragment=FRAG_2,
+ errmsg='Could not find the python development headers', mandatory=mandatory)
+
+@conf
+def check_python_version(conf, minver=None):
+ """
+ Check if the python interpreter is found matching a given minimum version.
+ minver should be a tuple, eg. to check for python >= 2.4.2 pass (2,4,2) as minver.
+
+ If successful, PYTHON_VERSION is defined as 'MAJOR.MINOR'
+ (eg. '2.4') of the actual python version found, and PYTHONDIR is
+ defined, pointing to the site-packages directory appropriate for
+ this python version, where modules/packages/extensions should be
+ installed.
+ """
+ assert minver is None or isinstance(minver, tuple)
+ python = conf.env['PYTHON']
+ if not python:
+ conf.fatal('could not find the python executable')
+
+ # Get python version string
+ cmd = [python, "-c", "import sys\nfor x in sys.version_info: print(str(x))"]
+ debug('python: Running python command %r' % cmd)
+ proc = Utils.pproc.Popen(cmd, stdout=Utils.pproc.PIPE, shell=False)
+ lines = proc.communicate()[0].split()
+ assert len(lines) == 5, "found %i lines, expected 5: %r" % (len(lines), lines)
+ pyver_tuple = (int(lines[0]), int(lines[1]), int(lines[2]), lines[3], int(lines[4]))
+
+ # compare python version with the minimum required
+ result = (minver is None) or (pyver_tuple >= minver)
+
+ if result:
+ # define useful environment variables
+ pyver = '.'.join([str(x) for x in pyver_tuple[:2]])
+ conf.env['PYTHON_VERSION'] = pyver
+
+ if 'PYTHONDIR' in conf.environ:
+ pydir = conf.environ['PYTHONDIR']
+ else:
+ if sys.platform == 'win32':
+ (python_LIBDEST, pydir) = \
+ _get_python_variables(python,
+ ["get_config_var('LIBDEST') or ''",
+ "get_python_lib(standard_lib=0, prefix=%r) or ''" % conf.env['PREFIX']],
+ ['from distutils.sysconfig import get_config_var, get_python_lib'])
+ else:
+ python_LIBDEST = None
+ (pydir,) = \
+ _get_python_variables(python,
+ ["get_python_lib(standard_lib=0, prefix=%r) or ''" % conf.env['PREFIX']],
+ ['from distutils.sysconfig import get_config_var, get_python_lib'])
+ if python_LIBDEST is None:
+ if conf.env['LIBDIR']:
+ python_LIBDEST = os.path.join(conf.env['LIBDIR'], "python" + pyver)
+ else:
+ python_LIBDEST = os.path.join(conf.env['PREFIX'], "lib", "python" + pyver)
+
+ if 'PYTHONARCHDIR' in conf.environ:
+ pyarchdir = conf.environ['PYTHONARCHDIR']
+ else:
+ (pyarchdir,) = _get_python_variables(python,
+ ["get_python_lib(plat_specific=1, standard_lib=0, prefix=%r) or ''" % conf.env['PREFIX']],
+ ['from distutils.sysconfig import get_config_var, get_python_lib'])
+ if not pyarchdir:
+ pyarchdir = pydir
+
+ if hasattr(conf, 'define'): # conf.define is added by the C tool, so may not exist
+ conf.define('PYTHONDIR', pydir)
+ conf.define('PYTHONARCHDIR', pyarchdir)
+
+ conf.env['PYTHONDIR'] = pydir
+
+ # Feedback
+ pyver_full = '.'.join(map(str, pyver_tuple[:3]))
+ if minver is None:
+ conf.check_message_custom('Python version', '', pyver_full)
+ else:
+ minver_str = '.'.join(map(str, minver))
+ conf.check_message('Python version', ">= %s" % minver_str, result, option=pyver_full)
+
+ if not result:
+ conf.fatal('The python version is too old (%r)' % pyver_full)
+
+@conf
+def check_python_module(conf, module_name):
+ """
+ Check if the selected python interpreter can import the given python module.
+ """
+ result = not Utils.pproc.Popen([conf.env['PYTHON'], "-c", "import %s" % module_name],
+ stderr=Utils.pproc.PIPE, stdout=Utils.pproc.PIPE).wait()
+ conf.check_message('Python module', module_name, result)
+ if not result:
+ conf.fatal('Could not find the python module %r' % module_name)
+
+def detect(conf):
+
+ if not conf.env.PYTHON:
+ conf.env.PYTHON = sys.executable
+
+ python = conf.find_program('python', var='PYTHON')
+ if not python:
+ conf.fatal('Could not find the path of the python executable')
+
+ if conf.env.PYTHON != sys.executable:
+ warn("python executable '%s' different from sys.executable '%s'" % (conf.env.PYTHON, sys.executable))
+
+ v = conf.env
+ v['PYCMD'] = '"import sys, py_compile;py_compile.compile(sys.argv[1], sys.argv[2])"'
+ v['PYFLAGS'] = ''
+ v['PYFLAGS_OPT'] = '-O'
+
+ v['PYC'] = getattr(Options.options, 'pyc', 1)
+ v['PYO'] = getattr(Options.options, 'pyo', 1)
+
+def set_options(opt):
+ opt.add_option('--nopyc',
+ action='store_false',
+ default=1,
+ help = 'Do not install bytecode compiled .pyc files (configuration) [Default:install]',
+ dest = 'pyc')
+ opt.add_option('--nopyo',
+ action='store_false',
+ default=1,
+ help='Do not install optimised compiled .pyo files (configuration) [Default:install]',
+ dest='pyo')
diff --git a/third_party/waf/wafadmin/Tools/qt4.py b/third_party/waf/wafadmin/Tools/qt4.py
new file mode 100644
index 0000000..7d2cad7
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/qt4.py
@@ -0,0 +1,504 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006 (ita)
+
+"""
+Qt4 support
+
+If QT4_ROOT is given (absolute path), the configuration will look in it first
+
+This module also demonstrates how to add tasks dynamically (when the build has started)
+"""
+
+try:
+ from xml.sax import make_parser
+ from xml.sax.handler import ContentHandler
+except ImportError:
+ has_xml = False
+ ContentHandler = object
+else:
+ has_xml = True
+
+import os, sys
+import ccroot, cxx
+import TaskGen, Task, Utils, Runner, Options, Node, Configure
+from TaskGen import taskgen, feature, after, extension
+from Logs import error
+from Constants import *
+
+MOC_H = ['.h', '.hpp', '.hxx', '.hh']
+EXT_RCC = ['.qrc']
+EXT_UI = ['.ui']
+EXT_QT4 = ['.cpp', '.cc', '.cxx', '.C']
+
+class qxx_task(Task.Task):
+ "A cpp task that may create a moc task dynamically"
+
+ before = ['cxx_link', 'static_link']
+
+ def __init__(self, *k, **kw):
+ Task.Task.__init__(self, *k, **kw)
+ self.moc_done = 0
+
+ def scan(self):
+ (nodes, names) = ccroot.scan(self)
+ # for some reasons (variants) the moc node may end in the list of node deps
+ for x in nodes:
+ if x.name.endswith('.moc'):
+ nodes.remove(x)
+ names.append(x.relpath_gen(self.inputs[0].parent))
+ return (nodes, names)
+
+ def runnable_status(self):
+ if self.moc_done:
+ # if there is a moc task, delay the computation of the file signature
+ for t in self.run_after:
+ if not t.hasrun:
+ return ASK_LATER
+ # the moc file enters in the dependency calculation
+ # so we need to recompute the signature when the moc file is present
+ self.signature()
+ return Task.Task.runnable_status(self)
+ else:
+ # yes, really, there are people who generate cxx files
+ for t in self.run_after:
+ if not t.hasrun:
+ return ASK_LATER
+ self.add_moc_tasks()
+ return ASK_LATER
+
+ def add_moc_tasks(self):
+
+ node = self.inputs[0]
+ tree = node.__class__.bld
+
+ try:
+ # compute the signature once to know if there is a moc file to create
+ self.signature()
+ except KeyError:
+ # the moc file may be referenced somewhere else
+ pass
+ else:
+ # remove the signature, it must be recomputed with the moc task
+ delattr(self, 'cache_sig')
+
+ moctasks=[]
+ mocfiles=[]
+ variant = node.variant(self.env)
+ try:
+ tmp_lst = tree.raw_deps[self.unique_id()]
+ tree.raw_deps[self.unique_id()] = []
+ except KeyError:
+ tmp_lst = []
+ for d in tmp_lst:
+ if not d.endswith('.moc'): continue
+ # paranoid check
+ if d in mocfiles:
+ error("paranoia owns")
+ continue
+
+ # process that base.moc only once
+ mocfiles.append(d)
+
+ # find the extension (performed only when the .cpp has changes)
+ base2 = d[:-4]
+ for path in [node.parent] + self.generator.env['INC_PATHS']:
+ tree.rescan(path)
+ vals = getattr(Options.options, 'qt_header_ext', '') or MOC_H
+ for ex in vals:
+ h_node = path.find_resource(base2 + ex)
+ if h_node:
+ break
+ else:
+ continue
+ break
+ else:
+ raise Utils.WafError("no header found for %s which is a moc file" % str(d))
+
+ m_node = h_node.change_ext('.moc')
+ tree.node_deps[(self.inputs[0].parent.id, self.env.variant(), m_node.name)] = h_node
+
+ # create the task
+ task = Task.TaskBase.classes['moc'](self.env, normal=0)
+ task.set_inputs(h_node)
+ task.set_outputs(m_node)
+
+ generator = tree.generator
+ generator.outstanding.insert(0, task)
+ generator.total += 1
+
+ moctasks.append(task)
+
+ # remove raw deps except the moc files to save space (optimization)
+ tmp_lst = tree.raw_deps[self.unique_id()] = mocfiles
+
+ # look at the file inputs, it is set right above
+ lst = tree.node_deps.get(self.unique_id(), ())
+ for d in lst:
+ name = d.name
+ if name.endswith('.moc'):
+ task = Task.TaskBase.classes['moc'](self.env, normal=0)
+ task.set_inputs(tree.node_deps[(self.inputs[0].parent.id, self.env.variant(), name)]) # 1st element in a tuple
+ task.set_outputs(d)
+
+ generator = tree.generator
+ generator.outstanding.insert(0, task)
+ generator.total += 1
+
+ moctasks.append(task)
+
+ # simple scheduler dependency: run the moc task before others
+ self.run_after = moctasks
+ self.moc_done = 1
+
+ run = Task.TaskBase.classes['cxx'].__dict__['run']
+
+def translation_update(task):
+ outs = [a.abspath(task.env) for a in task.outputs]
+ outs = " ".join(outs)
+ lupdate = task.env['QT_LUPDATE']
+
+ for x in task.inputs:
+ file = x.abspath(task.env)
+ cmd = "%s %s -ts %s" % (lupdate, file, outs)
+ Utils.pprint('BLUE', cmd)
+ task.generator.bld.exec_command(cmd)
+
+class XMLHandler(ContentHandler):
+ def __init__(self):
+ self.buf = []
+ self.files = []
+ def startElement(self, name, attrs):
+ if name == 'file':
+ self.buf = []
+ def endElement(self, name):
+ if name == 'file':
+ self.files.append(''.join(self.buf))
+ def characters(self, cars):
+ self.buf.append(cars)
+
+def scan(self):
+ "add the dependency on the files referenced in the qrc"
+ node = self.inputs[0]
+ parser = make_parser()
+ curHandler = XMLHandler()
+ parser.setContentHandler(curHandler)
+ fi = open(self.inputs[0].abspath(self.env))
+ parser.parse(fi)
+ fi.close()
+
+ nodes = []
+ names = []
+ root = self.inputs[0].parent
+ for x in curHandler.files:
+ nd = root.find_resource(x)
+ if nd: nodes.append(nd)
+ else: names.append(x)
+
+ return (nodes, names)
+
+@extension(EXT_RCC)
+def create_rcc_task(self, node):
+ "hook for rcc files"
+ rcnode = node.change_ext('_rc.cpp')
+ rcctask = self.create_task('rcc', node, rcnode)
+ cpptask = self.create_task('cxx', rcnode, rcnode.change_ext('.o'))
+ self.compiled_tasks.append(cpptask)
+ return cpptask
+
+@extension(EXT_UI)
+def create_uic_task(self, node):
+ "hook for uic tasks"
+ uictask = self.create_task('ui4', node)
+ uictask.outputs = [self.path.find_or_declare(self.env['ui_PATTERN'] % node.name[:-3])]
+ return uictask
+
+class qt4_taskgen(cxx.cxx_taskgen):
+ def __init__(self, *k, **kw):
+ cxx.cxx_taskgen.__init__(self, *k, **kw)
+ self.features.append('qt4')
+
+@extension('.ts')
+def add_lang(self, node):
+ """add all the .ts file into self.lang"""
+ self.lang = self.to_list(getattr(self, 'lang', [])) + [node]
+
+@feature('qt4')
+@after('apply_link')
+def apply_qt4(self):
+ if getattr(self, 'lang', None):
+ update = getattr(self, 'update', None)
+ lst=[]
+ trans=[]
+ for l in self.to_list(self.lang):
+
+ if not isinstance(l, Node.Node):
+ l = self.path.find_resource(l+'.ts')
+
+ t = self.create_task('ts2qm', l, l.change_ext('.qm'))
+ lst.append(t.outputs[0])
+
+ if update:
+ trans.append(t.inputs[0])
+
+ trans_qt4 = getattr(Options.options, 'trans_qt4', False)
+ if update and trans_qt4:
+ # we need the cpp files given, except the rcc task we create after
+ # FIXME may be broken
+ u = Task.TaskCmd(translation_update, self.env, 2)
+ u.inputs = [a.inputs[0] for a in self.compiled_tasks]
+ u.outputs = trans
+
+ if getattr(self, 'langname', None):
+ t = Task.TaskBase.classes['qm2rcc'](self.env)
+ t.set_inputs(lst)
+ t.set_outputs(self.path.find_or_declare(self.langname+'.qrc'))
+ t.path = self.path
+ k = create_rcc_task(self, t.outputs[0])
+ self.link_task.inputs.append(k.outputs[0])
+
+ self.env.append_value('MOC_FLAGS', self.env._CXXDEFFLAGS)
+ self.env.append_value('MOC_FLAGS', self.env._CXXINCFLAGS)
+
+@extension(EXT_QT4)
+def cxx_hook(self, node):
+ # create the compilation task: cpp or cc
+ try: obj_ext = self.obj_ext
+ except AttributeError: obj_ext = '_%d.o' % self.idx
+
+ task = self.create_task('qxx', node, node.change_ext(obj_ext))
+ self.compiled_tasks.append(task)
+ return task
+
+def process_qm2rcc(task):
+ outfile = task.outputs[0].abspath(task.env)
+ f = open(outfile, 'w')
+ f.write('<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n')
+ for k in task.inputs:
+ f.write(' <file>')
+ #f.write(k.name)
+ f.write(k.path_to_parent(task.path))
+ f.write('</file>\n')
+ f.write('</qresource>\n</RCC>')
+ f.close()
+
+b = Task.simple_task_type
+b('moc', '${QT_MOC} ${MOC_FLAGS} ${SRC} ${MOC_ST} ${TGT}', color='BLUE', vars=['QT_MOC', 'MOC_FLAGS'], shell=False)
+cls = b('rcc', '${QT_RCC} -name ${SRC[0].name} ${SRC[0].abspath(env)} ${RCC_ST} -o ${TGT}', color='BLUE', before='cxx moc qxx_task', after="qm2rcc", shell=False)
+cls.scan = scan
+b('ui4', '${QT_UIC} ${SRC} -o ${TGT}', color='BLUE', before='cxx moc qxx_task', shell=False)
+b('ts2qm', '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}', color='BLUE', before='qm2rcc', shell=False)
+
+Task.task_type_from_func('qm2rcc', vars=[], func=process_qm2rcc, color='BLUE', before='rcc', after='ts2qm')
+
+def detect_qt4(conf):
+ env = conf.env
+ opt = Options.options
+
+ qtdir = getattr(opt, 'qtdir', '')
+ qtbin = getattr(opt, 'qtbin', '')
+ qtlibs = getattr(opt, 'qtlibs', '')
+ useframework = getattr(opt, 'use_qt4_osxframework', True)
+
+ paths = []
+
+ # the path to qmake has been given explicitely
+ if qtbin:
+ paths = [qtbin]
+
+ # the qt directory has been given - we deduce the qt binary path
+ if not qtdir:
+ qtdir = conf.environ.get('QT4_ROOT', '')
+ qtbin = os.path.join(qtdir, 'bin')
+ paths = [qtbin]
+
+ # no qtdir, look in the path and in /usr/local/Trolltech
+ if not qtdir:
+ paths = os.environ.get('PATH', '').split(os.pathsep)
+ paths.append('/usr/share/qt4/bin/')
+ try:
+ lst = os.listdir('/usr/local/Trolltech/')
+ except OSError:
+ pass
+ else:
+ if lst:
+ lst.sort()
+ lst.reverse()
+
+ # keep the highest version
+ qtdir = '/usr/local/Trolltech/%s/' % lst[0]
+ qtbin = os.path.join(qtdir, 'bin')
+ paths.append(qtbin)
+
+ # at the end, try to find qmake in the paths given
+ # keep the one with the highest version
+ cand = None
+ prev_ver = ['4', '0', '0']
+ for qmk in ['qmake-qt4', 'qmake4', 'qmake']:
+ qmake = conf.find_program(qmk, path_list=paths)
+ if qmake:
+ try:
+ version = Utils.cmd_output([qmake, '-query', 'QT_VERSION']).strip()
+ except ValueError:
+ pass
+ else:
+ if version:
+ new_ver = version.split('.')
+ if new_ver > prev_ver:
+ cand = qmake
+ prev_ver = new_ver
+ if cand:
+ qmake = cand
+ else:
+ conf.fatal('could not find qmake for qt4')
+
+ conf.env.QMAKE = qmake
+ qtincludes = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_HEADERS']).strip()
+ qtdir = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_PREFIX']).strip() + os.sep
+ qtbin = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_BINS']).strip() + os.sep
+
+ if not qtlibs:
+ try:
+ qtlibs = Utils.cmd_output([qmake, '-query', 'QT_INSTALL_LIBS']).strip() + os.sep
+ except ValueError:
+ qtlibs = os.path.join(qtdir, 'lib')
+
+ def find_bin(lst, var):
+ for f in lst:
+ ret = conf.find_program(f, path_list=paths)
+ if ret:
+ env[var]=ret
+ break
+
+ vars = "QtCore QtGui QtUiTools QtNetwork QtOpenGL QtSql QtSvg QtTest QtXml QtWebKit Qt3Support".split()
+
+ find_bin(['uic-qt3', 'uic3'], 'QT_UIC3')
+ find_bin(['uic-qt4', 'uic'], 'QT_UIC')
+ if not env['QT_UIC']:
+ conf.fatal('cannot find the uic compiler for qt4')
+
+ try:
+ version = Utils.cmd_output(env['QT_UIC'] + " -version 2>&1").strip()
+ except ValueError:
+ conf.fatal('your uic compiler is for qt3, add uic for qt4 to your path')
+
+ version = version.replace('Qt User Interface Compiler ','')
+ version = version.replace('User Interface Compiler for Qt', '')
+ if version.find(" 3.") != -1:
+ conf.check_message('uic version', '(too old)', 0, option='(%s)'%version)
+ sys.exit(1)
+ conf.check_message('uic version', '', 1, option='(%s)'%version)
+
+ find_bin(['moc-qt4', 'moc'], 'QT_MOC')
+ find_bin(['rcc'], 'QT_RCC')
+ find_bin(['lrelease-qt4', 'lrelease'], 'QT_LRELEASE')
+ find_bin(['lupdate-qt4', 'lupdate'], 'QT_LUPDATE')
+
+ env['UIC3_ST']= '%s -o %s'
+ env['UIC_ST'] = '%s -o %s'
+ env['MOC_ST'] = '-o'
+ env['ui_PATTERN'] = 'ui_%s.h'
+ env['QT_LRELEASE_FLAGS'] = ['-silent']
+
+ vars_debug = [a+'_debug' for a in vars]
+
+ try:
+ conf.find_program('pkg-config', var='pkgconfig', path_list=paths, mandatory=True)
+
+ except Configure.ConfigurationError:
+
+ for lib in vars_debug+vars:
+ uselib = lib.upper()
+
+ d = (lib.find('_debug') > 0) and 'd' or ''
+
+ # original author seems to prefer static to shared libraries
+ for (pat, kind) in ((conf.env.staticlib_PATTERN, 'STATIC'), (conf.env.shlib_PATTERN, '')):
+
+ conf.check_message_1('Checking for %s %s' % (lib, kind))
+
+ for ext in ['', '4']:
+ path = os.path.join(qtlibs, pat % (lib + d + ext))
+ if os.path.exists(path):
+ env.append_unique(kind + 'LIB_' + uselib, lib + d + ext)
+ conf.check_message_2('ok ' + path, 'GREEN')
+ break
+ path = os.path.join(qtbin, pat % (lib + d + ext))
+ if os.path.exists(path):
+ env.append_unique(kind + 'LIB_' + uselib, lib + d + ext)
+ conf.check_message_2('ok ' + path, 'GREEN')
+ break
+ else:
+ conf.check_message_2('not found', 'YELLOW')
+ continue
+ break
+
+ env.append_unique('LIBPATH_' + uselib, qtlibs)
+ env.append_unique('CPPPATH_' + uselib, qtincludes)
+ env.append_unique('CPPPATH_' + uselib, qtincludes + os.sep + lib)
+ else:
+ for i in vars_debug+vars:
+ try:
+ conf.check_cfg(package=i, args='--cflags --libs --silence-errors', path=conf.env.pkgconfig)
+ except ValueError:
+ pass
+
+ # the libpaths are set nicely, unfortunately they make really long command-lines
+ # remove the qtcore ones from qtgui, etc
+ def process_lib(vars_, coreval):
+ for d in vars_:
+ var = d.upper()
+ if var == 'QTCORE': continue
+
+ value = env['LIBPATH_'+var]
+ if value:
+ core = env[coreval]
+ accu = []
+ for lib in value:
+ if lib in core: continue
+ accu.append(lib)
+ env['LIBPATH_'+var] = accu
+
+ process_lib(vars, 'LIBPATH_QTCORE')
+ process_lib(vars_debug, 'LIBPATH_QTCORE_DEBUG')
+
+ # rpath if wanted
+ want_rpath = getattr(Options.options, 'want_rpath', 1)
+ if want_rpath:
+ def process_rpath(vars_, coreval):
+ for d in vars_:
+ var = d.upper()
+ value = env['LIBPATH_'+var]
+ if value:
+ core = env[coreval]
+ accu = []
+ for lib in value:
+ if var != 'QTCORE':
+ if lib in core:
+ continue
+ accu.append('-Wl,--rpath='+lib)
+ env['RPATH_'+var] = accu
+ process_rpath(vars, 'LIBPATH_QTCORE')
+ process_rpath(vars_debug, 'LIBPATH_QTCORE_DEBUG')
+
+ env['QTLOCALE'] = str(env['PREFIX'])+'/share/locale'
+
+def detect(conf):
+ detect_qt4(conf)
+
+def set_options(opt):
+ opt.add_option('--want-rpath', type='int', default=1, dest='want_rpath', help='set rpath to 1 or 0 [Default 1]')
+
+ opt.add_option('--header-ext',
+ type='string',
+ default='',
+ help='header extension for moc files',
+ dest='qt_header_ext')
+
+ for i in 'qtdir qtbin qtlibs'.split():
+ opt.add_option('--'+i, type='string', default='', dest=i)
+
+ if sys.platform == "darwin":
+ opt.add_option('--no-qt4-framework', action="store_false", help='do not use the framework version of Qt4 in OS X', dest='use_qt4_osxframework',default=True)
+
+ opt.add_option('--translate', action="store_true", help="collect translation strings", dest="trans_qt4", default=False)
diff --git a/third_party/waf/wafadmin/Tools/ruby.py b/third_party/waf/wafadmin/Tools/ruby.py
new file mode 100644
index 0000000..afa8a59
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/ruby.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# daniel.svensson at purplescout.se 2008
+
+import os
+import Task, Options, Utils
+from TaskGen import before, feature, after
+from Configure import conf
+
+@feature('rubyext')
+@before('apply_incpaths', 'apply_type_vars', 'apply_lib_vars', 'apply_bundle')
+@after('default_cc', 'vars_target_cshlib')
+def init_rubyext(self):
+ self.default_install_path = '${ARCHDIR_RUBY}'
+ self.uselib = self.to_list(getattr(self, 'uselib', ''))
+ if not 'RUBY' in self.uselib:
+ self.uselib.append('RUBY')
+ if not 'RUBYEXT' in self.uselib:
+ self.uselib.append('RUBYEXT')
+
+@feature('rubyext')
+@before('apply_link')
+def apply_ruby_so_name(self):
+ self.env['shlib_PATTERN'] = self.env['rubyext_PATTERN']
+
+@conf
+def check_ruby_version(conf, minver=()):
+ """
+ Checks if ruby is installed.
+ If installed the variable RUBY will be set in environment.
+ Ruby binary can be overridden by --with-ruby-binary config variable
+ """
+
+ if Options.options.rubybinary:
+ conf.env.RUBY = Options.options.rubybinary
+ else:
+ conf.find_program("ruby", var="RUBY", mandatory=True)
+
+ ruby = conf.env.RUBY
+
+ try:
+ version = Utils.cmd_output([ruby, '-e', 'puts defined?(VERSION) ? VERSION : RUBY_VERSION']).strip()
+ except:
+ conf.fatal('could not determine ruby version')
+ conf.env.RUBY_VERSION = version
+
+ try:
+ ver = tuple(map(int, version.split(".")))
+ except:
+ conf.fatal('unsupported ruby version %r' % version)
+
+ cver = ''
+ if minver:
+ if ver < minver:
+ conf.fatal('ruby is too old')
+ cver = ".".join([str(x) for x in minver])
+
+ conf.check_message('ruby', cver, True, version)
+
+@conf
+def check_ruby_ext_devel(conf):
+ if not conf.env.RUBY:
+ conf.fatal('ruby detection is required first')
+
+ if not conf.env.CC_NAME and not conf.env.CXX_NAME:
+ conf.fatal('load a c/c++ compiler first')
+
+ version = tuple(map(int, conf.env.RUBY_VERSION.split(".")))
+
+ def read_out(cmd):
+ return Utils.to_list(Utils.cmd_output([conf.env.RUBY, '-rrbconfig', '-e', cmd]))
+
+ def read_config(key):
+ return read_out('puts Config::CONFIG[%r]' % key)
+
+ ruby = conf.env['RUBY']
+ archdir = read_config('archdir')
+ cpppath = archdir
+ if version >= (1, 9, 0):
+ ruby_hdrdir = read_config('rubyhdrdir')
+ cpppath += ruby_hdrdir
+ cpppath += [os.path.join(ruby_hdrdir[0], read_config('arch')[0])]
+
+ conf.check(header_name='ruby.h', includes=cpppath, mandatory=True, errmsg='could not find ruby header file')
+
+ conf.env.LIBPATH_RUBYEXT = read_config('libdir')
+ conf.env.LIBPATH_RUBYEXT += archdir
+ conf.env.CPPPATH_RUBYEXT = cpppath
+ conf.env.CCFLAGS_RUBYEXT = read_config("CCDLFLAGS")
+ conf.env.rubyext_PATTERN = '%s.' + read_config('DLEXT')[0]
+
+ # ok this is really stupid, but the command and flags are combined.
+ # so we try to find the first argument...
+ flags = read_config('LDSHARED')
+ while flags and flags[0][0] != '-':
+ flags = flags[1:]
+
+ # we also want to strip out the deprecated ppc flags
+ if len(flags) > 1 and flags[1] == "ppc":
+ flags = flags[2:]
+
+ conf.env.LINKFLAGS_RUBYEXT = flags
+ conf.env.LINKFLAGS_RUBYEXT += read_config("LIBS")
+ conf.env.LINKFLAGS_RUBYEXT += read_config("LIBRUBYARG_SHARED")
+
+ if Options.options.rubyarchdir:
+ conf.env.ARCHDIR_RUBY = Options.options.rubyarchdir
+ else:
+ conf.env.ARCHDIR_RUBY = read_config('sitearchdir')[0]
+
+ if Options.options.rubylibdir:
+ conf.env.LIBDIR_RUBY = Options.options.rubylibdir
+ else:
+ conf.env.LIBDIR_RUBY = read_config('sitelibdir')[0]
+
+def set_options(opt):
+ opt.add_option('--with-ruby-archdir', type='string', dest='rubyarchdir', help='Specify directory where to install arch specific files')
+ opt.add_option('--with-ruby-libdir', type='string', dest='rubylibdir', help='Specify alternate ruby library path')
+ opt.add_option('--with-ruby-binary', type='string', dest='rubybinary', help='Specify alternate ruby binary')
diff --git a/third_party/waf/wafadmin/Tools/suncc.py b/third_party/waf/wafadmin/Tools/suncc.py
new file mode 100644
index 0000000..b1a2aad
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/suncc.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006 (ita)
+# Ralf Habacker, 2006 (rh)
+
+import os, optparse
+import Utils, Options, Configure
+import ccroot, ar
+from Configure import conftest
+
+@conftest
+def find_scc(conf):
+ v = conf.env
+ cc = None
+ if v['CC']: cc = v['CC']
+ elif 'CC' in conf.environ: cc = conf.environ['CC']
+ #if not cc: cc = conf.find_program('gcc', var='CC')
+ if not cc: cc = conf.find_program('cc', var='CC')
+ if not cc: conf.fatal('suncc was not found')
+ cc = conf.cmd_to_list(cc)
+
+ try:
+ if not Utils.cmd_output(cc + ['-flags']):
+ conf.fatal('suncc %r was not found' % cc)
+ except ValueError:
+ conf.fatal('suncc -flags could not be executed')
+
+ v['CC'] = cc
+ v['CC_NAME'] = 'sun'
+
+@conftest
+def scc_common_flags(conf):
+ v = conf.env
+
+ # CPPFLAGS CCDEFINES _CCINCFLAGS _CCDEFFLAGS
+
+ v['CC_SRC_F'] = ''
+ v['CC_TGT_F'] = ['-c', '-o', '']
+ v['CPPPATH_ST'] = '-I%s' # template for adding include paths
+
+ # linker
+ if not v['LINK_CC']: v['LINK_CC'] = v['CC']
+ v['CCLNK_SRC_F'] = ''
+ v['CCLNK_TGT_F'] = ['-o', ''] # solaris hack, separate the -o from the target
+
+ v['LIB_ST'] = '-l%s' # template for adding libs
+ v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
+ v['STATICLIB_ST'] = '-l%s'
+ v['STATICLIBPATH_ST'] = '-L%s'
+ v['CCDEFINES_ST'] = '-D%s'
+
+ v['SONAME_ST'] = '-Wl,-h -Wl,%s'
+ v['SHLIB_MARKER'] = '-Bdynamic'
+ v['STATICLIB_MARKER'] = '-Bstatic'
+
+ # program
+ v['program_PATTERN'] = '%s'
+
+ # shared library
+ v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC']
+ v['shlib_LINKFLAGS'] = ['-G']
+ v['shlib_PATTERN'] = 'lib%s.so'
+
+ # static lib
+ v['staticlib_LINKFLAGS'] = ['-Bstatic']
+ v['staticlib_PATTERN'] = 'lib%s.a'
+
+detect = '''
+find_scc
+find_cpp
+find_ar
+scc_common_flags
+cc_load_tools
+cc_add_flags
+link_add_flags
+'''
diff --git a/third_party/waf/wafadmin/Tools/suncxx.py b/third_party/waf/wafadmin/Tools/suncxx.py
new file mode 100644
index 0000000..8754b6c
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/suncxx.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006 (ita)
+# Ralf Habacker, 2006 (rh)
+
+import os, optparse
+import Utils, Options, Configure
+import ccroot, ar
+from Configure import conftest
+
+@conftest
+def find_sxx(conf):
+ v = conf.env
+ cc = None
+ if v['CXX']: cc = v['CXX']
+ elif 'CXX' in conf.environ: cc = conf.environ['CXX']
+ if not cc: cc = conf.find_program('c++', var='CXX')
+ if not cc: conf.fatal('sunc++ was not found')
+ cc = conf.cmd_to_list(cc)
+
+ try:
+ if not Utils.cmd_output(cc + ['-flags']):
+ conf.fatal('sunc++ %r was not found' % cc)
+ except ValueError:
+ conf.fatal('sunc++ -flags could not be executed')
+
+ v['CXX'] = cc
+ v['CXX_NAME'] = 'sun'
+
+@conftest
+def sxx_common_flags(conf):
+ v = conf.env
+
+ # CPPFLAGS CXXDEFINES _CXXINCFLAGS _CXXDEFFLAGS
+
+ v['CXX_SRC_F'] = ''
+ v['CXX_TGT_F'] = ['-c', '-o', '']
+ v['CPPPATH_ST'] = '-I%s' # template for adding include paths
+
+ # linker
+ if not v['LINK_CXX']: v['LINK_CXX'] = v['CXX']
+ v['CXXLNK_SRC_F'] = ''
+ v['CXXLNK_TGT_F'] = ['-o', ''] # solaris hack, separate the -o from the target
+
+ v['LIB_ST'] = '-l%s' # template for adding libs
+ v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
+ v['STATICLIB_ST'] = '-l%s'
+ v['STATICLIBPATH_ST'] = '-L%s'
+ v['CXXDEFINES_ST'] = '-D%s'
+
+ v['SONAME_ST'] = '-Wl,-h -Wl,%s'
+ v['SHLIB_MARKER'] = '-Bdynamic'
+ v['STATICLIB_MARKER'] = '-Bstatic'
+
+ # program
+ v['program_PATTERN'] = '%s'
+
+ # shared library
+ v['shlib_CXXFLAGS'] = ['-Kpic', '-DPIC']
+ v['shlib_LINKFLAGS'] = ['-G']
+ v['shlib_PATTERN'] = 'lib%s.so'
+
+ # static lib
+ v['staticlib_LINKFLAGS'] = ['-Bstatic']
+ v['staticlib_PATTERN'] = 'lib%s.a'
+
+detect = '''
+find_sxx
+find_cpp
+find_ar
+sxx_common_flags
+cxx_load_tools
+cxx_add_flags
+link_add_flags
+'''
diff --git a/third_party/waf/wafadmin/Tools/tex.py b/third_party/waf/wafadmin/Tools/tex.py
new file mode 100644
index 0000000..43aee1f
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/tex.py
@@ -0,0 +1,250 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006 (ita)
+
+"TeX/LaTeX/PDFLaTeX support"
+
+import os, re
+import Utils, TaskGen, Task, Runner, Build
+from TaskGen import feature, before
+from Logs import error, warn, debug
+
+re_tex = re.compile(r'\\(?P<type>include|input|import|bringin|lstinputlisting){(?P<file>[^{}]*)}', re.M)
+def scan(self):
+ node = self.inputs[0]
+ env = self.env
+
+ nodes = []
+ names = []
+ if not node: return (nodes, names)
+
+ code = Utils.readf(node.abspath(env))
+
+ curdirnode = self.curdirnode
+ abs = curdirnode.abspath()
+ for match in re_tex.finditer(code):
+ path = match.group('file')
+ if path:
+ for k in ['', '.tex', '.ltx']:
+ # add another loop for the tex include paths?
+ debug('tex: trying %s%s' % (path, k))
+ try:
+ os.stat(abs+os.sep+path+k)
+ except OSError:
+ continue
+ found = path+k
+ node = curdirnode.find_resource(found)
+ if node:
+ nodes.append(node)
+ else:
+ debug('tex: could not find %s' % path)
+ names.append(path)
+
+ debug("tex: found the following : %s and names %s" % (nodes, names))
+ return (nodes, names)
+
+latex_fun, _ = Task.compile_fun('latex', '${LATEX} ${LATEXFLAGS} ${SRCFILE}', shell=False)
+pdflatex_fun, _ = Task.compile_fun('pdflatex', '${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}', shell=False)
+bibtex_fun, _ = Task.compile_fun('bibtex', '${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}', shell=False)
+makeindex_fun, _ = Task.compile_fun('bibtex', '${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}', shell=False)
+
+g_bibtex_re = re.compile('bibdata', re.M)
+def tex_build(task, command='LATEX'):
+ env = task.env
+ bld = task.generator.bld
+
+ if not env['PROMPT_LATEX']:
+ env.append_value('LATEXFLAGS', '-interaction=batchmode')
+ env.append_value('PDFLATEXFLAGS', '-interaction=batchmode')
+
+ fun = latex_fun
+ if command == 'PDFLATEX':
+ fun = pdflatex_fun
+
+ node = task.inputs[0]
+ reldir = node.bld_dir(env)
+
+ #lst = []
+ #for c in Utils.split_path(reldir):
+ # if c: lst.append('..')
+ #srcfile = os.path.join(*(lst + [node.srcpath(env)]))
+ #sr2 = os.path.join(*(lst + [node.parent.srcpath(env)]))
+ srcfile = node.abspath(env)
+ sr2 = node.parent.abspath() + os.pathsep + node.parent.abspath(env) + os.pathsep
+
+ aux_node = node.change_ext('.aux')
+ idx_node = node.change_ext('.idx')
+
+ nm = aux_node.name
+ docuname = nm[ : len(nm) - 4 ] # 4 is the size of ".aux"
+
+ # important, set the cwd for everybody
+ task.cwd = task.inputs[0].parent.abspath(task.env)
+
+
+ warn('first pass on %s' % command)
+
+ task.env.env = {'TEXINPUTS': sr2}
+ task.env.SRCFILE = srcfile
+ ret = fun(task)
+ if ret:
+ return ret
+
+ # look in the .aux file if there is a bibfile to process
+ try:
+ ct = Utils.readf(aux_node.abspath(env))
+ except (OSError, IOError):
+ error('error bibtex scan')
+ else:
+ fo = g_bibtex_re.findall(ct)
+
+ # there is a .aux file to process
+ if fo:
+ warn('calling bibtex')
+
+ task.env.env = {'BIBINPUTS': sr2, 'BSTINPUTS': sr2}
+ task.env.SRCFILE = docuname
+ ret = bibtex_fun(task)
+ if ret:
+ error('error when calling bibtex %s' % docuname)
+ return ret
+
+ # look on the filesystem if there is a .idx file to process
+ try:
+ idx_path = idx_node.abspath(env)
+ os.stat(idx_path)
+ except OSError:
+ error('error file.idx scan')
+ else:
+ warn('calling makeindex')
+
+ task.env.SRCFILE = idx_node.name
+ task.env.env = {}
+ ret = makeindex_fun(task)
+ if ret:
+ error('error when calling makeindex %s' % idx_path)
+ return ret
+
+
+ hash = ''
+ i = 0
+ while i < 10:
+ # prevent against infinite loops - one never knows
+ i += 1
+
+ # watch the contents of file.aux
+ prev_hash = hash
+ try:
+ hash = Utils.h_file(aux_node.abspath(env))
+ except KeyError:
+ error('could not read aux.h -> %s' % aux_node.abspath(env))
+ pass
+
+ # debug
+ #print "hash is, ", hash, " ", old_hash
+
+ # stop if file.aux does not change anymore
+ if hash and hash == prev_hash:
+ break
+
+ # run the command
+ warn('calling %s' % command)
+
+ task.env.env = {'TEXINPUTS': sr2 + os.pathsep}
+ task.env.SRCFILE = srcfile
+ ret = fun(task)
+ if ret:
+ error('error when calling %s %s' % (command, latex_fun))
+ return ret
+
+ return None # ok
+
+latex_vardeps = ['LATEX', 'LATEXFLAGS']
+def latex_build(task):
+ return tex_build(task, 'LATEX')
+
+pdflatex_vardeps = ['PDFLATEX', 'PDFLATEXFLAGS']
+def pdflatex_build(task):
+ return tex_build(task, 'PDFLATEX')
+
+class tex_taskgen(TaskGen.task_gen):
+ def __init__(self, *k, **kw):
+ TaskGen.task_gen.__init__(self, *k, **kw)
+
+@feature('tex')
+@before('apply_core')
+def apply_tex(self):
+ if not getattr(self, 'type', None) in ['latex', 'pdflatex']:
+ self.type = 'pdflatex'
+
+ tree = self.bld
+ outs = Utils.to_list(getattr(self, 'outs', []))
+
+ # prompt for incomplete files (else the batchmode is used)
+ self.env['PROMPT_LATEX'] = getattr(self, 'prompt', 1)
+
+ deps_lst = []
+
+ if getattr(self, 'deps', None):
+ deps = self.to_list(self.deps)
+ for filename in deps:
+ n = self.path.find_resource(filename)
+ if not n in deps_lst: deps_lst.append(n)
+
+ self.source = self.to_list(self.source)
+ for filename in self.source:
+ base, ext = os.path.splitext(filename)
+
+ node = self.path.find_resource(filename)
+ if not node: raise Utils.WafError('cannot find %s' % filename)
+
+ if self.type == 'latex':
+ task = self.create_task('latex', node, node.change_ext('.dvi'))
+ elif self.type == 'pdflatex':
+ task = self.create_task('pdflatex', node, node.change_ext('.pdf'))
+
+ task.env = self.env
+ task.curdirnode = self.path
+
+ # add the manual dependencies
+ if deps_lst:
+ variant = node.variant(self.env)
+ try:
+ lst = tree.node_deps[task.unique_id()]
+ for n in deps_lst:
+ if not n in lst:
+ lst.append(n)
+ except KeyError:
+ tree.node_deps[task.unique_id()] = deps_lst
+
+ if self.type == 'latex':
+ if 'ps' in outs:
+ tsk = self.create_task('dvips', task.outputs, node.change_ext('.ps'))
+ tsk.env.env = {'TEXINPUTS' : node.parent.abspath() + os.pathsep + self.path.abspath() + os.pathsep + self.path.abspath(self.env)}
+ if 'pdf' in outs:
+ tsk = self.create_task('dvipdf', task.outputs, node.change_ext('.pdf'))
+ tsk.env.env = {'TEXINPUTS' : node.parent.abspath() + os.pathsep + self.path.abspath() + os.pathsep + self.path.abspath(self.env)}
+ elif self.type == 'pdflatex':
+ if 'ps' in outs:
+ self.create_task('pdf2ps', task.outputs, node.change_ext('.ps'))
+ self.source = []
+
+def detect(conf):
+ v = conf.env
+ for p in 'tex latex pdflatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps'.split():
+ conf.find_program(p, var=p.upper())
+ v[p.upper()+'FLAGS'] = ''
+ v['DVIPSFLAGS'] = '-Ppdf'
+
+b = Task.simple_task_type
+b('tex', '${TEX} ${TEXFLAGS} ${SRC}', color='BLUE', shell=False) # not used anywhere
+b('bibtex', '${BIBTEX} ${BIBTEXFLAGS} ${SRC}', color='BLUE', shell=False) # not used anywhere
+b('dvips', '${DVIPS} ${DVIPSFLAGS} ${SRC} -o ${TGT}', color='BLUE', after="latex pdflatex tex bibtex", shell=False)
+b('dvipdf', '${DVIPDF} ${DVIPDFFLAGS} ${SRC} ${TGT}', color='BLUE', after="latex pdflatex tex bibtex", shell=False)
+b('pdf2ps', '${PDF2PS} ${PDF2PSFLAGS} ${SRC} ${TGT}', color='BLUE', after="dvipdf pdflatex", shell=False)
+
+b = Task.task_type_from_func
+cls = b('latex', latex_build, vars=latex_vardeps)
+cls.scan = scan
+cls = b('pdflatex', pdflatex_build, vars=pdflatex_vardeps)
+cls.scan = scan
diff --git a/third_party/waf/wafadmin/Tools/unittestw.py b/third_party/waf/wafadmin/Tools/unittestw.py
new file mode 100644
index 0000000..7cf2ded
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/unittestw.py
@@ -0,0 +1,308 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Carlos Rafael Giani, 2006
+
+"""
+Unit tests run in the shutdown() method, and for c/c++ programs
+
+One should NOT have to give parameters to programs to execute
+
+In the shutdown method, add the following code:
+
+ >>> def shutdown():
+ ... ut = UnitTest.unit_test()
+ ... ut.run()
+ ... ut.print_results()
+
+
+Each object to use as a unit test must be a program and must have X{obj.unit_test=1}
+"""
+import os, sys
+import Build, TaskGen, Utils, Options, Logs, Task
+from TaskGen import before, after, feature
+from Constants import *
+
+class unit_test(object):
+ "Unit test representation"
+ def __init__(self):
+ self.returncode_ok = 0 # Unit test returncode considered OK. All returncodes differing from this one
+ # will cause the unit test to be marked as "FAILED".
+
+ # The following variables are filled with data by run().
+
+ # print_results() uses these for printing the unit test summary,
+ # but if there is need for direct access to the results,
+ # they can be retrieved here, after calling run().
+
+ self.num_tests_ok = 0 # Number of successful unit tests
+ self.num_tests_failed = 0 # Number of failed unit tests
+ self.num_tests_err = 0 # Tests that have not even run
+ self.total_num_tests = 0 # Total amount of unit tests
+ self.max_label_length = 0 # Maximum label length (pretty-print the output)
+
+ self.unit_tests = Utils.ordered_dict() # Unit test dictionary. Key: the label (unit test filename relative
+ # to the build dir), value: unit test filename with absolute path
+ self.unit_test_results = {} # Dictionary containing the unit test results.
+ # Key: the label, value: result (true = success false = failure)
+ self.unit_test_erroneous = {} # Dictionary indicating erroneous unit tests.
+ # Key: the label, value: true = unit test has an error false = unit test is ok
+ self.change_to_testfile_dir = False #True if the test file needs to be executed from the same dir
+ self.want_to_see_test_output = False #True to see the stdout from the testfile (for example check suites)
+ self.want_to_see_test_error = False #True to see the stderr from the testfile (for example check suites)
+ self.run_if_waf_does = 'check' #build was the old default
+
+ def run(self):
+ "Run the unit tests and gather results (note: no output here)"
+
+ self.num_tests_ok = 0
+ self.num_tests_failed = 0
+ self.num_tests_err = 0
+ self.total_num_tests = 0
+ self.max_label_length = 0
+
+ self.unit_tests = Utils.ordered_dict()
+ self.unit_test_results = {}
+ self.unit_test_erroneous = {}
+
+ ld_library_path = []
+
+ # If waf is not building, don't run anything
+ if not Options.commands[self.run_if_waf_does]: return
+
+ # Get the paths for the shared libraries, and obtain the unit tests to execute
+ for obj in Build.bld.all_task_gen:
+ try:
+ link_task = obj.link_task
+ except AttributeError:
+ pass
+ else:
+ lib_path = link_task.outputs[0].parent.abspath(obj.env)
+ if lib_path not in ld_library_path:
+ ld_library_path.append(lib_path)
+
+ unit_test = getattr(obj, 'unit_test', '')
+ if unit_test and 'cprogram' in obj.features:
+ try:
+ output = obj.path
+ filename = os.path.join(output.abspath(obj.env), obj.target)
+ srcdir = output.abspath()
+ label = os.path.join(output.bldpath(obj.env), obj.target)
+ self.max_label_length = max(self.max_label_length, len(label))
+ self.unit_tests[label] = (filename, srcdir)
+ except KeyError:
+ pass
+ self.total_num_tests = len(self.unit_tests)
+ # Now run the unit tests
+ Utils.pprint('GREEN', 'Running the unit tests')
+ count = 0
+ result = 1
+
+ for label in self.unit_tests.allkeys:
+ file_and_src = self.unit_tests[label]
+ filename = file_and_src[0]
+ srcdir = file_and_src[1]
+ count += 1
+ line = Build.bld.progress_line(count, self.total_num_tests, Logs.colors.GREEN, Logs.colors.NORMAL)
+ if Options.options.progress_bar and line:
+ sys.stderr.write(line)
+ sys.stderr.flush()
+ try:
+ kwargs = {}
+ kwargs['env'] = os.environ.copy()
+ if self.change_to_testfile_dir:
+ kwargs['cwd'] = srcdir
+ if not self.want_to_see_test_output:
+ kwargs['stdout'] = Utils.pproc.PIPE # PIPE for ignoring output
+ if not self.want_to_see_test_error:
+ kwargs['stderr'] = Utils.pproc.PIPE # PIPE for ignoring output
+ if ld_library_path:
+ v = kwargs['env']
+ def add_path(dct, path, var):
+ dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')])
+ if sys.platform == 'win32':
+ add_path(v, ld_library_path, 'PATH')
+ elif sys.platform == 'darwin':
+ add_path(v, ld_library_path, 'DYLD_LIBRARY_PATH')
+ add_path(v, ld_library_path, 'LD_LIBRARY_PATH')
+ else:
+ add_path(v, ld_library_path, 'LD_LIBRARY_PATH')
+
+ pp = Utils.pproc.Popen(filename, **kwargs)
+ (out, err) = pp.communicate() # uh, and the output is ignored?? - fortunately this is going to disappear
+
+ result = int(pp.returncode == self.returncode_ok)
+
+ if result:
+ self.num_tests_ok += 1
+ else:
+ self.num_tests_failed += 1
+
+ self.unit_test_results[label] = result
+ self.unit_test_erroneous[label] = 0
+ except OSError:
+ self.unit_test_erroneous[label] = 1
+ self.num_tests_err += 1
+ except KeyboardInterrupt:
+ pass
+ if Options.options.progress_bar: sys.stdout.write(Logs.colors.cursor_on)
+
+ def print_results(self):
+ "Pretty-prints a summary of all unit tests, along with some statistics"
+
+ # If waf is not building, don't output anything
+ if not Options.commands[self.run_if_waf_does]: return
+
+ p = Utils.pprint
+ # Early quit if no tests were performed
+ if self.total_num_tests == 0:
+ p('YELLOW', 'No unit tests present')
+ return
+
+ for label in self.unit_tests.allkeys:
+ filename = self.unit_tests[label]
+ err = 0
+ result = 0
+
+ try: err = self.unit_test_erroneous[label]
+ except KeyError: pass
+
+ try: result = self.unit_test_results[label]
+ except KeyError: pass
+
+ n = self.max_label_length - len(label)
+ if err: n += 4
+ elif result: n += 7
+ else: n += 3
+
+ line = '%s %s' % (label, '.' * n)
+
+ if err: p('RED', '%sERROR' % line)
+ elif result: p('GREEN', '%sOK' % line)
+ else: p('YELLOW', '%sFAILED' % line)
+
+ percentage_ok = float(self.num_tests_ok) / float(self.total_num_tests) * 100.0
+ percentage_failed = float(self.num_tests_failed) / float(self.total_num_tests) * 100.0
+ percentage_erroneous = float(self.num_tests_err) / float(self.total_num_tests) * 100.0
+
+ p('NORMAL', '''
+Successful tests: %i (%.1f%%)
+Failed tests: %i (%.1f%%)
+Erroneous tests: %i (%.1f%%)
+
+Total number of tests: %i
+''' % (self.num_tests_ok, percentage_ok, self.num_tests_failed, percentage_failed,
+ self.num_tests_err, percentage_erroneous, self.total_num_tests))
+ p('GREEN', 'Unit tests finished')
+
+
+############################################################################################
+
+"""
+New unit test system
+
+The targets with feature 'test' are executed after they are built
+bld(features='cprogram cc test', ...)
+
+To display the results:
+import UnitTest
+bld.add_post_fun(UnitTest.summary)
+"""
+
+import threading
+testlock = threading.Lock()
+
+def set_options(opt):
+ opt.add_option('--alltests', action='store_true', default=True, help='Exec all unit tests', dest='all_tests')
+
+@feature('test')
+@after('apply_link', 'vars_target_cprogram')
+def make_test(self):
+ if not 'cprogram' in self.features:
+ Logs.error('test cannot be executed %s' % self)
+ return
+
+ self.default_install_path = None
+ self.create_task('utest', self.link_task.outputs)
+
+def exec_test(self):
+
+ status = 0
+
+ variant = self.env.variant()
+
+ filename = self.inputs[0].abspath(self.env)
+ self.ut_exec = getattr(self, 'ut_exec', [filename])
+ if getattr(self.generator, 'ut_fun', None):
+ self.generator.ut_fun(self)
+
+ try:
+ fu = getattr(self.generator.bld, 'all_test_paths')
+ except AttributeError:
+ fu = os.environ.copy()
+ self.generator.bld.all_test_paths = fu
+
+ lst = []
+ for obj in self.generator.bld.all_task_gen:
+ link_task = getattr(obj, 'link_task', None)
+ if link_task and link_task.env.variant() == variant:
+ lst.append(link_task.outputs[0].parent.abspath(obj.env))
+
+ def add_path(dct, path, var):
+ dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')])
+
+ if sys.platform == 'win32':
+ add_path(fu, lst, 'PATH')
+ elif sys.platform == 'darwin':
+ add_path(fu, lst, 'DYLD_LIBRARY_PATH')
+ add_path(fu, lst, 'LD_LIBRARY_PATH')
+ else:
+ add_path(fu, lst, 'LD_LIBRARY_PATH')
+
+
+ cwd = getattr(self.generator, 'ut_cwd', '') or self.inputs[0].parent.abspath(self.env)
+ proc = Utils.pproc.Popen(self.ut_exec, cwd=cwd, env=fu, stderr=Utils.pproc.PIPE, stdout=Utils.pproc.PIPE)
+ (stdout, stderr) = proc.communicate()
+
+ tup = (filename, proc.returncode, stdout, stderr)
+ self.generator.utest_result = tup
+
+ testlock.acquire()
+ try:
+ bld = self.generator.bld
+ Logs.debug("ut: %r", tup)
+ try:
+ bld.utest_results.append(tup)
+ except AttributeError:
+ bld.utest_results = [tup]
+ finally:
+ testlock.release()
+
+cls = Task.task_type_from_func('utest', func=exec_test, color='PINK', ext_in='.bin')
+
+old = cls.runnable_status
+def test_status(self):
+ ret = old(self)
+ if ret == SKIP_ME and getattr(Options.options, 'all_tests', False):
+ return RUN_ME
+ return ret
+
+cls.runnable_status = test_status
+cls.quiet = 1
+
+def summary(bld):
+ lst = getattr(bld, 'utest_results', [])
+ if lst:
+ Utils.pprint('CYAN', 'execution summary')
+
+ total = len(lst)
+ tfail = len([x for x in lst if x[1]])
+
+ Utils.pprint('CYAN', ' tests that pass %d/%d' % (total-tfail, total))
+ for (f, code, out, err) in lst:
+ if not code:
+ Utils.pprint('CYAN', ' %s' % f)
+
+ Utils.pprint('CYAN', ' tests that fail %d/%d' % (tfail, total))
+ for (f, code, out, err) in lst:
+ if code:
+ Utils.pprint('CYAN', ' %s' % f)
diff --git a/third_party/waf/wafadmin/Tools/vala.py b/third_party/waf/wafadmin/Tools/vala.py
new file mode 100644
index 0000000..df1d11b
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/vala.py
@@ -0,0 +1,307 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Ali Sabil, 2007
+
+import os.path, shutil
+import Task, Runner, Utils, Logs, Build, Node, Options
+from TaskGen import extension, after, before
+
+EXT_VALA = ['.vala', '.gs']
+
+class valac_task(Task.Task):
+
+ vars = ("VALAC", "VALAC_VERSION", "VALAFLAGS")
+ before = ("cc", "cxx")
+
+ def run(self):
+ env = self.env
+ inputs = [a.srcpath(env) for a in self.inputs]
+ valac = env['VALAC']
+ vala_flags = env.get_flat('VALAFLAGS')
+ top_src = self.generator.bld.srcnode.abspath()
+ top_bld = self.generator.bld.srcnode.abspath(env)
+
+ if env['VALAC_VERSION'] > (0, 1, 6):
+ cmd = [valac, '-C', '--quiet', vala_flags]
+ else:
+ cmd = [valac, '-C', vala_flags]
+
+ if self.threading:
+ cmd.append('--thread')
+
+ if self.profile:
+ cmd.append('--profile=%s' % self.profile)
+
+ if self.target_glib:
+ cmd.append('--target-glib=%s' % self.target_glib)
+
+ features = self.generator.features
+
+ if 'cshlib' in features or 'cstaticlib' in features:
+ output_dir = self.outputs[0].bld_dir(env)
+ cmd.append('--library ' + self.target)
+ if env['VALAC_VERSION'] >= (0, 7, 0):
+ for x in self.outputs:
+ if x.name.endswith('.h'):
+ cmd.append('--header ' + x.bldpath(self.env))
+ cmd.append('--basedir ' + top_src)
+ cmd.append('-d ' + top_bld)
+ if env['VALAC_VERSION'] > (0, 7, 2) and hasattr(self, 'gir'):
+ cmd.append('--gir=%s.gir' % self.gir)
+
+ else:
+ output_dir = self.outputs[0].bld_dir(env)
+ cmd.append('-d %s' % output_dir)
+
+ for vapi_dir in self.vapi_dirs:
+ cmd.append('--vapidir=%s' % vapi_dir)
+
+ for package in self.packages:
+ cmd.append('--pkg %s' % package)
+
+ for package in self.packages_private:
+ cmd.append('--pkg %s' % package)
+
+ cmd.append(" ".join(inputs))
+ result = self.generator.bld.exec_command(" ".join(cmd))
+
+ if not 'cprogram' in features:
+ # generate the .deps file
+ if self.packages:
+ filename = os.path.join(self.generator.path.abspath(env), "%s.deps" % self.target)
+ deps = open(filename, 'w')
+ for package in self.packages:
+ deps.write(package + '\n')
+ deps.close()
+
+ # handle vala 0.1.6 who doesn't honor --directory for the generated .vapi
+ self._fix_output("../%s.vapi" % self.target)
+ # handle vala >= 0.1.7 who has a weid definition for --directory
+ self._fix_output("%s.vapi" % self.target)
+ # handle vala >= 0.2.0 who doesn't honor --directory for the generated .gidl
+ self._fix_output("%s.gidl" % self.target)
+ # handle vala >= 0.3.6 who doesn't honor --directory for the generated .gir
+ self._fix_output("%s.gir" % self.target)
+ if hasattr(self, 'gir'):
+ self._fix_output("%s.gir" % self.gir)
+
+ first = None
+ for node in self.outputs:
+ if not first:
+ first = node
+ else:
+ if first.parent.id != node.parent.id:
+ # issue #483
+ if env['VALAC_VERSION'] < (0, 7, 0):
+ shutil.move(first.parent.abspath(self.env) + os.sep + node.name, node.abspath(self.env))
+ return result
+
+ def install(self):
+ bld = self.generator.bld
+ features = self.generator.features
+
+ if self.attr("install_path") and ("cshlib" in features or "cstaticlib" in features):
+ headers_list = [o for o in self.outputs if o.suffix() == ".h"]
+ vapi_list = [o for o in self.outputs if (o.suffix() in (".vapi", ".deps"))]
+ gir_list = [o for o in self.outputs if o.suffix() == ".gir"]
+
+ for header in headers_list:
+ top_src = self.generator.bld.srcnode
+ package = self.env['PACKAGE']
+ try:
+ api_version = Utils.g_module.API_VERSION
+ except AttributeError:
+ version = Utils.g_module.VERSION.split(".")
+ if version[0] == "0":
+ api_version = "0." + version[1]
+ else:
+ api_version = version[0] + ".0"
+ install_path = '${INCLUDEDIR}/%s-%s/%s' % (package, api_version, header.relpath_gen(top_src))
+ bld.install_as(install_path, header, self.env)
+ bld.install_files('${DATAROOTDIR}/vala/vapi', vapi_list, self.env)
+ bld.install_files('${DATAROOTDIR}/gir-1.0', gir_list, self.env)
+
+ def _fix_output(self, output):
+ top_bld = self.generator.bld.srcnode.abspath(self.env)
+ try:
+ src = os.path.join(top_bld, output)
+ dst = self.generator.path.abspath (self.env)
+ shutil.move(src, dst)
+ except:
+ pass
+
+@extension(EXT_VALA)
+def vala_file(self, node):
+ valatask = getattr(self, "valatask", None)
+ # there is only one vala task and it compiles all vala files .. :-/
+ if not valatask:
+ valatask = self.create_task('valac')
+ self.valatask = valatask
+ self.includes = Utils.to_list(getattr(self, 'includes', []))
+ self.uselib = self.to_list(self.uselib)
+ valatask.packages = []
+ valatask.packages_private = Utils.to_list(getattr(self, 'packages_private', []))
+ valatask.vapi_dirs = []
+ valatask.target = self.target
+ valatask.threading = False
+ valatask.install_path = self.install_path
+ valatask.profile = getattr (self, 'profile', 'gobject')
+ valatask.target_glib = None #Deprecated
+
+ packages = Utils.to_list(getattr(self, 'packages', []))
+ vapi_dirs = Utils.to_list(getattr(self, 'vapi_dirs', []))
+ includes = []
+
+ if hasattr(self, 'uselib_local'):
+ local_packages = Utils.to_list(self.uselib_local)
+ seen = []
+ while len(local_packages) > 0:
+ package = local_packages.pop()
+ if package in seen:
+ continue
+ seen.append(package)
+
+ # check if the package exists
+ package_obj = self.name_to_obj(package)
+ if not package_obj:
+ raise Utils.WafError("object '%s' was not found in uselib_local (required by '%s')" % (package, self.name))
+
+ package_name = package_obj.target
+ package_node = package_obj.path
+ package_dir = package_node.relpath_gen(self.path)
+
+ for task in package_obj.tasks:
+ for output in task.outputs:
+ if output.name == package_name + ".vapi":
+ valatask.set_run_after(task)
+ if package_name not in packages:
+ packages.append(package_name)
+ if package_dir not in vapi_dirs:
+ vapi_dirs.append(package_dir)
+ if package_dir not in includes:
+ includes.append(package_dir)
+
+ if hasattr(package_obj, 'uselib_local'):
+ lst = self.to_list(package_obj.uselib_local)
+ lst.reverse()
+ local_packages = [pkg for pkg in lst if pkg not in seen] + local_packages
+
+ valatask.packages = packages
+ for vapi_dir in vapi_dirs:
+ try:
+ valatask.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath())
+ valatask.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath(self.env))
+ except AttributeError:
+ Logs.warn("Unable to locate Vala API directory: '%s'" % vapi_dir)
+
+ self.includes.append(node.bld.srcnode.abspath())
+ self.includes.append(node.bld.srcnode.abspath(self.env))
+ for include in includes:
+ try:
+ self.includes.append(self.path.find_dir(include).abspath())
+ self.includes.append(self.path.find_dir(include).abspath(self.env))
+ except AttributeError:
+ Logs.warn("Unable to locate include directory: '%s'" % include)
+
+ if valatask.profile == 'gobject':
+ if hasattr(self, 'target_glib'):
+ Logs.warn ('target_glib on vala tasks is deprecated --vala-target-glib=MAJOR.MINOR from the vala tool options')
+
+ if getattr(Options.options, 'vala_target_glib', None):
+ valatask.target_glib = Options.options.vala_target_glib
+
+ if not 'GOBJECT' in self.uselib:
+ self.uselib.append('GOBJECT')
+
+ if hasattr(self, 'threading'):
+ if valatask.profile == 'gobject':
+ valatask.threading = self.threading
+ if not 'GTHREAD' in self.uselib:
+ self.uselib.append('GTHREAD')
+ else:
+ #Vala doesn't have threading support for dova nor posix
+ Logs.warn("Profile %s does not have threading support" % valatask.profile)
+
+ if hasattr(self, 'gir'):
+ valatask.gir = self.gir
+
+ env = valatask.env
+
+ output_nodes = []
+
+ c_node = node.change_ext('.c')
+ output_nodes.append(c_node)
+ self.allnodes.append(c_node)
+
+ if env['VALAC_VERSION'] < (0, 7, 0):
+ output_nodes.append(node.change_ext('.h'))
+ else:
+ if not 'cprogram' in self.features:
+ output_nodes.append(self.path.find_or_declare('%s.h' % self.target))
+
+ if not 'cprogram' in self.features:
+ output_nodes.append(self.path.find_or_declare('%s.vapi' % self.target))
+ if env['VALAC_VERSION'] > (0, 7, 2):
+ if hasattr(self, 'gir'):
+ output_nodes.append(self.path.find_or_declare('%s.gir' % self.gir))
+ elif env['VALAC_VERSION'] > (0, 3, 5):
+ output_nodes.append(self.path.find_or_declare('%s.gir' % self.target))
+ elif env['VALAC_VERSION'] > (0, 1, 7):
+ output_nodes.append(self.path.find_or_declare('%s.gidl' % self.target))
+ if valatask.packages:
+ output_nodes.append(self.path.find_or_declare('%s.deps' % self.target))
+
+ valatask.inputs.append(node)
+ valatask.outputs.extend(output_nodes)
+
+def detect(conf):
+ min_version = (0, 1, 6)
+ min_version_str = "%d.%d.%d" % min_version
+
+ valac = conf.find_program('valac', var='VALAC', mandatory=True)
+
+ if not conf.env["HAVE_GOBJECT"]:
+ pkg_args = {'package': 'gobject-2.0',
+ 'uselib_store': 'GOBJECT',
+ 'args': '--cflags --libs'}
+ if getattr(Options.options, 'vala_target_glib', None):
+ pkg_args['atleast_version'] = Options.options.vala_target_glib
+
+ conf.check_cfg(**pkg_args)
+
+ if not conf.env["HAVE_GTHREAD"]:
+ pkg_args = {'package': 'gthread-2.0',
+ 'uselib_store': 'GTHREAD',
+ 'args': '--cflags --libs'}
+ if getattr(Options.options, 'vala_target_glib', None):
+ pkg_args['atleast_version'] = Options.options.vala_target_glib
+
+ conf.check_cfg(**pkg_args)
+
+ try:
+ output = Utils.cmd_output(valac + " --version", silent=True)
+ version = output.split(' ', 1)[-1].strip().split(".")[0:3]
+ version = [int(x) for x in version]
+ valac_version = tuple(version)
+ except Exception:
+ valac_version = (0, 0, 0)
+
+ conf.check_message('program version',
+ 'valac >= ' + min_version_str,
+ valac_version >= min_version,
+ "%d.%d.%d" % valac_version)
+
+ conf.check_tool('gnu_dirs')
+
+ if valac_version < min_version:
+ conf.fatal("valac version too old to be used with this tool")
+ return
+
+ conf.env['VALAC_VERSION'] = valac_version
+ conf.env['VALAFLAGS'] = ''
+
+def set_options (opt):
+ valaopts = opt.add_option_group('Vala Compiler Options')
+ valaopts.add_option ('--vala-target-glib', default=None,
+ dest='vala_target_glib', metavar='MAJOR.MINOR',
+ help='Target version of glib for Vala GObject code generation')
diff --git a/third_party/waf/wafadmin/Tools/winres.py b/third_party/waf/wafadmin/Tools/winres.py
new file mode 100644
index 0000000..6b5aad0
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/winres.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Brant Young, 2007
+
+"This hook is called when the class cpp/cc task generator encounters a '.rc' file: X{.rc -> [.res|.rc.o]}"
+
+import os, sys, re
+import TaskGen, Task
+from Utils import quote_whitespace
+from TaskGen import extension
+
+EXT_WINRC = ['.rc']
+
+winrc_str = '${WINRC} ${_CPPDEFFLAGS} ${_CCDEFFLAGS} ${WINRCFLAGS} ${_CPPINCFLAGS} ${_CCINCFLAGS} ${WINRC_TGT_F} ${TGT} ${WINRC_SRC_F} ${SRC}'
+
+@extension(EXT_WINRC)
+def rc_file(self, node):
+ obj_ext = '.rc.o'
+ if self.env['WINRC_TGT_F'] == '/fo': obj_ext = '.res'
+
+ rctask = self.create_task('winrc', node, node.change_ext(obj_ext))
+ self.compiled_tasks.append(rctask)
+
+# create our action, for use with rc file
+Task.simple_task_type('winrc', winrc_str, color='BLUE', before='cc cxx', shell=False)
+
+def detect(conf):
+ v = conf.env
+
+ winrc = v['WINRC']
+ v['WINRC_TGT_F'] = '-o'
+ v['WINRC_SRC_F'] = '-i'
+ # find rc.exe
+ if not winrc:
+ if v['CC_NAME'] in ['gcc', 'cc', 'g++', 'c++']:
+ winrc = conf.find_program('windres', var='WINRC', path_list = v['PATH'])
+ elif v['CC_NAME'] == 'msvc':
+ winrc = conf.find_program('RC', var='WINRC', path_list = v['PATH'])
+ v['WINRC_TGT_F'] = '/fo'
+ v['WINRC_SRC_F'] = ''
+ if not winrc:
+ conf.fatal('winrc was not found!')
+
+ v['WINRCFLAGS'] = ''
diff --git a/third_party/waf/wafadmin/Tools/xlc.py b/third_party/waf/wafadmin/Tools/xlc.py
new file mode 100644
index 0000000..e33b7a1
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/xlc.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006-2008 (ita)
+# Ralf Habacker, 2006 (rh)
+# Yinon Ehrlich, 2009
+# Michael Kuhn, 2009
+
+import os, sys
+import Configure, Options, Utils
+import ccroot, ar
+from Configure import conftest
+
+@conftest
+def find_xlc(conf):
+ cc = conf.find_program(['xlc_r', 'xlc'], var='CC', mandatory=True)
+ cc = conf.cmd_to_list(cc)
+ conf.env.CC_NAME = 'xlc'
+ conf.env.CC = cc
+
+@conftest
+def find_cpp(conf):
+ v = conf.env
+ cpp = None
+ if v['CPP']: cpp = v['CPP']
+ elif 'CPP' in conf.environ: cpp = conf.environ['CPP']
+ #if not cpp: cpp = v['CC']
+ v['CPP'] = cpp
+
+@conftest
+def xlc_common_flags(conf):
+ v = conf.env
+
+ # CPPFLAGS CCDEFINES _CCINCFLAGS _CCDEFFLAGS
+ v['CCFLAGS_DEBUG'] = ['-g']
+ v['CCFLAGS_RELEASE'] = ['-O2']
+
+ v['CC_SRC_F'] = ''
+ v['CC_TGT_F'] = ['-c', '-o', ''] # shell hack for -MD
+ v['CPPPATH_ST'] = '-I%s' # template for adding include paths
+
+ # linker
+ if not v['LINK_CC']: v['LINK_CC'] = v['CC']
+ v['CCLNK_SRC_F'] = ''
+ v['CCLNK_TGT_F'] = ['-o', ''] # shell hack for -MD
+
+ v['LIB_ST'] = '-l%s' # template for adding libs
+ v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
+ v['STATICLIB_ST'] = '-l%s'
+ v['STATICLIBPATH_ST'] = '-L%s'
+ v['RPATH_ST'] = '-Wl,-rpath,%s'
+ v['CCDEFINES_ST'] = '-D%s'
+
+ v['SONAME_ST'] = ''
+ v['SHLIB_MARKER'] = ''
+ v['STATICLIB_MARKER'] = ''
+ v['FULLSTATIC_MARKER'] = '-static'
+
+ # program
+ v['program_LINKFLAGS'] = ['-Wl,-brtl']
+ v['program_PATTERN'] = '%s'
+
+ # shared library
+ v['shlib_CCFLAGS'] = ['-fPIC', '-DPIC'] # avoid using -DPIC, -fPIC aleady defines the __PIC__ macro
+ v['shlib_LINKFLAGS'] = ['-G', '-Wl,-brtl,-bexpfull']
+ v['shlib_PATTERN'] = 'lib%s.so'
+
+ # static lib
+ v['staticlib_LINKFLAGS'] = ''
+ v['staticlib_PATTERN'] = 'lib%s.a'
+
+def detect(conf):
+ conf.find_xlc()
+ conf.find_cpp()
+ conf.find_ar()
+ conf.xlc_common_flags()
+ conf.cc_load_tools()
+ conf.cc_add_flags()
+ conf.link_add_flags()
diff --git a/third_party/waf/wafadmin/Tools/xlcxx.py b/third_party/waf/wafadmin/Tools/xlcxx.py
new file mode 100644
index 0000000..6e84662
--- /dev/null
+++ b/third_party/waf/wafadmin/Tools/xlcxx.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2006 (ita)
+# Ralf Habacker, 2006 (rh)
+# Yinon Ehrlich, 2009
+# Michael Kuhn, 2009
+
+import os, sys
+import Configure, Options, Utils
+import ccroot, ar
+from Configure import conftest
+
+@conftest
+def find_xlcxx(conf):
+ cxx = conf.find_program(['xlc++_r', 'xlc++'], var='CXX', mandatory=True)
+ cxx = conf.cmd_to_list(cxx)
+ conf.env.CXX_NAME = 'xlc++'
+ conf.env.CXX = cxx
+
+@conftest
+def find_cpp(conf):
+ v = conf.env
+ cpp = None
+ if v['CPP']: cpp = v['CPP']
+ elif 'CPP' in conf.environ: cpp = conf.environ['CPP']
+ #if not cpp: cpp = v['CXX']
+ v['CPP'] = cpp
+
+@conftest
+def xlcxx_common_flags(conf):
+ v = conf.env
+
+ # CPPFLAGS CXXDEFINES _CXXINCFLAGS _CXXDEFFLAGS
+ v['CXXFLAGS_DEBUG'] = ['-g']
+ v['CXXFLAGS_RELEASE'] = ['-O2']
+
+ v['CXX_SRC_F'] = ''
+ v['CXX_TGT_F'] = ['-c', '-o', ''] # shell hack for -MD
+ v['CPPPATH_ST'] = '-I%s' # template for adding include paths
+
+ # linker
+ if not v['LINK_CXX']: v['LINK_CXX'] = v['CXX']
+ v['CXXLNK_SRC_F'] = ''
+ v['CXXLNK_TGT_F'] = ['-o', ''] # shell hack for -MD
+
+ v['LIB_ST'] = '-l%s' # template for adding libs
+ v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
+ v['STATICLIB_ST'] = '-l%s'
+ v['STATICLIBPATH_ST'] = '-L%s'
+ v['RPATH_ST'] = '-Wl,-rpath,%s'
+ v['CXXDEFINES_ST'] = '-D%s'
+
+ v['SONAME_ST'] = ''
+ v['SHLIB_MARKER'] = ''
+ v['STATICLIB_MARKER'] = ''
+ v['FULLSTATIC_MARKER'] = '-static'
+
+ # program
+ v['program_LINKFLAGS'] = ['-Wl,-brtl']
+ v['program_PATTERN'] = '%s'
+
+ # shared library
+ v['shlib_CXXFLAGS'] = ['-fPIC', '-DPIC'] # avoid using -DPIC, -fPIC aleady defines the __PIC__ macro
+ v['shlib_LINKFLAGS'] = ['-G', '-Wl,-brtl,-bexpfull']
+ v['shlib_PATTERN'] = 'lib%s.so'
+
+ # static lib
+ v['staticlib_LINKFLAGS'] = ''
+ v['staticlib_PATTERN'] = 'lib%s.a'
+
+def detect(conf):
+ conf.find_xlcxx()
+ conf.find_cpp()
+ conf.find_ar()
+ conf.xlcxx_common_flags()
+ conf.cxx_load_tools()
+ conf.cxx_add_flags()
+ conf.link_add_flags()
diff --git a/third_party/waf/wafadmin/Utils.py b/third_party/waf/wafadmin/Utils.py
new file mode 100644
index 0000000..5a59a4c
--- /dev/null
+++ b/third_party/waf/wafadmin/Utils.py
@@ -0,0 +1,725 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2005 (ita)
+
+"""
+Utilities, the stable ones are the following:
+
+* h_file: compute a unique value for a file (hash), it uses
+ the module fnv if it is installed (see waf/utils/fnv & http://code.google.com/p/waf/wiki/FAQ)
+ else, md5 (see the python docs)
+
+ For large projects (projects with more than 15000 files) or slow hard disks and filesystems (HFS)
+ it is possible to use a hashing based on the path and the size (may give broken cache results)
+ The method h_file MUST raise an OSError if the file is a folder
+
+ import stat
+ def h_file(filename):
+ st = os.lstat(filename)
+ if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('not a file')
+ m = Utils.md5()
+ m.update(str(st.st_mtime))
+ m.update(str(st.st_size))
+ m.update(filename)
+ return m.digest()
+
+ To replace the function in your project, use something like this:
+ import Utils
+ Utils.h_file = h_file
+
+* h_list
+* h_fun
+* get_term_cols
+* ordered_dict
+
+"""
+
+import os, sys, imp, string, errno, traceback, inspect, re, shutil, datetime, gc
+
+# In python 3.0 we can get rid of all this
+try: from UserDict import UserDict
+except ImportError: from collections import UserDict
+if sys.hexversion >= 0x2060000 or os.name == 'java':
+ import subprocess as pproc
+else:
+ import pproc
+import Logs
+from Constants import *
+
+try:
+ from collections import deque
+except ImportError:
+ class deque(list):
+ def popleft(self):
+ return self.pop(0)
+
+is_win32 = sys.platform == 'win32'
+
+try:
+ # defaultdict in python 2.5
+ from collections import defaultdict as DefaultDict
+except ImportError:
+ class DefaultDict(dict):
+ def __init__(self, default_factory):
+ super(DefaultDict, self).__init__()
+ self.default_factory = default_factory
+ def __getitem__(self, key):
+ try:
+ return super(DefaultDict, self).__getitem__(key)
+ except KeyError:
+ value = self.default_factory()
+ self[key] = value
+ return value
+
+class WafError(Exception):
+ def __init__(self, *args):
+ self.args = args
+ try:
+ self.stack = traceback.extract_stack()
+ except:
+ pass
+ Exception.__init__(self, *args)
+ def __str__(self):
+ return str(len(self.args) == 1 and self.args[0] or self.args)
+
+class WscriptError(WafError):
+ def __init__(self, message, wscript_file=None):
+ if wscript_file:
+ self.wscript_file = wscript_file
+ self.wscript_line = None
+ else:
+ try:
+ (self.wscript_file, self.wscript_line) = self.locate_error()
+ except:
+ (self.wscript_file, self.wscript_line) = (None, None)
+
+ msg_file_line = ''
+ if self.wscript_file:
+ msg_file_line = "%s:" % self.wscript_file
+ if self.wscript_line:
+ msg_file_line += "%s:" % self.wscript_line
+ err_message = "%s error: %s" % (msg_file_line, message)
+ WafError.__init__(self, err_message)
+
+ def locate_error(self):
+ stack = traceback.extract_stack()
+ stack.reverse()
+ for frame in stack:
+ file_name = os.path.basename(frame[0])
+ is_wscript = (file_name == WSCRIPT_FILE or file_name == WSCRIPT_BUILD_FILE)
+ if is_wscript:
+ return (frame[0], frame[1])
+ return (None, None)
+
+indicator = is_win32 and '\x1b[A\x1b[K%s%s%s\r' or '\x1b[K%s%s%s\r'
+
+try:
+ from fnv import new as md5
+ import Constants
+ Constants.SIG_NIL = 'signofnv'
+
+ def h_file(filename):
+ m = md5()
+ try:
+ m.hfile(filename)
+ x = m.digest()
+ if x is None: raise OSError("not a file")
+ return x
+ except SystemError:
+ raise OSError("not a file" + filename)
+
+except ImportError:
+ try:
+ try:
+ from hashlib import md5
+ except ImportError:
+ from md5 import md5
+
+ def h_file(filename):
+ f = open(filename, 'rb')
+ m = md5()
+ while (filename):
+ filename = f.read(100000)
+ m.update(filename)
+ f.close()
+ return m.digest()
+ except ImportError:
+ # portability fixes may be added elsewhere (although, md5 should be everywhere by now)
+ md5 = None
+
+class ordered_dict(UserDict):
+ def __init__(self, dict = None):
+ self.allkeys = []
+ UserDict.__init__(self, dict)
+
+ def __delitem__(self, key):
+ self.allkeys.remove(key)
+ UserDict.__delitem__(self, key)
+
+ def __setitem__(self, key, item):
+ if key not in self.allkeys: self.allkeys.append(key)
+ UserDict.__setitem__(self, key, item)
+
+def exec_command(s, **kw):
+ if 'log' in kw:
+ kw['stdout'] = kw['stderr'] = kw['log']
+ del(kw['log'])
+ kw['shell'] = isinstance(s, str)
+
+ try:
+ proc = pproc.Popen(s, **kw)
+ return proc.wait()
+ except OSError:
+ return -1
+
+if is_win32:
+ def exec_command(s, **kw):
+ if 'log' in kw:
+ kw['stdout'] = kw['stderr'] = kw['log']
+ del(kw['log'])
+ kw['shell'] = isinstance(s, str)
+
+ if len(s) > 2000:
+ startupinfo = pproc.STARTUPINFO()
+ startupinfo.dwFlags |= pproc.STARTF_USESHOWWINDOW
+ kw['startupinfo'] = startupinfo
+
+ try:
+ if 'stdout' not in kw:
+ kw['stdout'] = pproc.PIPE
+ kw['stderr'] = pproc.PIPE
+ kw['universal_newlines'] = True
+ proc = pproc.Popen(s,**kw)
+ (stdout, stderr) = proc.communicate()
+ Logs.info(stdout)
+ if stderr:
+ Logs.error(stderr)
+ return proc.returncode
+ else:
+ proc = pproc.Popen(s,**kw)
+ return proc.wait()
+ except OSError:
+ return -1
+
+listdir = os.listdir
+if is_win32:
+ def listdir_win32(s):
+ if re.match('^[A-Za-z]:$', s):
+ # os.path.isdir fails if s contains only the drive name... (x:)
+ s += os.sep
+ if not os.path.isdir(s):
+ e = OSError()
+ e.errno = errno.ENOENT
+ raise e
+ return os.listdir(s)
+ listdir = listdir_win32
+
+def waf_version(mini = 0x010000, maxi = 0x100000):
+ "Halts if the waf version is wrong"
+ ver = HEXVERSION
+ try: min_val = mini + 0
+ except TypeError: min_val = int(mini.replace('.', '0'), 16)
+
+ if min_val > ver:
+ Logs.error("waf version should be at least %s (%s found)" % (mini, ver))
+ sys.exit(1)
+
+ try: max_val = maxi + 0
+ except TypeError: max_val = int(maxi.replace('.', '0'), 16)
+
+ if max_val < ver:
+ Logs.error("waf version should be at most %s (%s found)" % (maxi, ver))
+ sys.exit(1)
+
+def python_24_guard():
+ if sys.hexversion < 0x20400f0 or sys.hexversion >= 0x3000000:
+ raise ImportError("Waf requires Python >= 2.3 but the raw source requires Python 2.4, 2.5 or 2.6")
+
+def ex_stack():
+ exc_type, exc_value, tb = sys.exc_info()
+ if Logs.verbose > 1:
+ exc_lines = traceback.format_exception(exc_type, exc_value, tb)
+ return ''.join(exc_lines)
+ return str(exc_value)
+
+def to_list(sth):
+ if isinstance(sth, str):
+ return sth.split()
+ else:
+ return sth
+
+g_loaded_modules = {}
+"index modules by absolute path"
+
+g_module=None
+"the main module is special"
+
+def load_module(file_path, name=WSCRIPT_FILE):
+ "this function requires an absolute path"
+ try:
+ return g_loaded_modules[file_path]
+ except KeyError:
+ pass
+
+ module = imp.new_module(name)
+
+ try:
+ code = readf(file_path, m='rU')
+ except (IOError, OSError):
+ raise WscriptError('Could not read the file %r' % file_path)
+
+ module.waf_hash_val = code
+
+ dt = os.path.dirname(file_path)
+ sys.path.insert(0, dt)
+ try:
+ exec(compile(code, file_path, 'exec'), module.__dict__)
+ except Exception:
+ exc_type, exc_value, tb = sys.exc_info()
+ raise WscriptError("".join(traceback.format_exception(exc_type, exc_value, tb)), file_path)
+ sys.path.remove(dt)
+
+ g_loaded_modules[file_path] = module
+
+ return module
+
+def set_main_module(file_path):
+ "Load custom options, if defined"
+ global g_module
+ g_module = load_module(file_path, 'wscript_main')
+ g_module.root_path = file_path
+
+ try:
+ g_module.APPNAME
+ except:
+ g_module.APPNAME = 'noname'
+ try:
+ g_module.VERSION
+ except:
+ g_module.VERSION = '1.0'
+
+ # note: to register the module globally, use the following:
+ # sys.modules['wscript_main'] = g_module
+
+def to_hashtable(s):
+ "used for importing env files"
+ tbl = {}
+ lst = s.split('\n')
+ for line in lst:
+ if not line: continue
+ mems = line.split('=')
+ tbl[mems[0]] = mems[1]
+ return tbl
+
+def get_term_cols():
+ "console width"
+ return 80
+try:
+ import struct, fcntl, termios
+except ImportError:
+ pass
+else:
+ if Logs.got_tty:
+ def myfun():
+ dummy_lines, cols = struct.unpack("HHHH", \
+ fcntl.ioctl(sys.stderr.fileno(),termios.TIOCGWINSZ , \
+ struct.pack("HHHH", 0, 0, 0, 0)))[:2]
+ return cols
+ # we actually try the function once to see if it is suitable
+ try:
+ myfun()
+ except:
+ pass
+ else:
+ get_term_cols = myfun
+
+rot_idx = 0
+rot_chr = ['\\', '|', '/', '-']
+"the rotation character in the progress bar"
+
+
+def split_path(path):
+ return path.split('/')
+
+def split_path_cygwin(path):
+ if path.startswith('//'):
+ ret = path.split('/')[2:]
+ ret[0] = '/' + ret[0]
+ return ret
+ return path.split('/')
+
+re_sp = re.compile('[/\\\\]')
+def split_path_win32(path):
+ if path.startswith('\\\\'):
+ ret = re.split(re_sp, path)[2:]
+ ret[0] = '\\' + ret[0]
+ return ret
+ return re.split(re_sp, path)
+
+if sys.platform == 'cygwin':
+ split_path = split_path_cygwin
+elif is_win32:
+ split_path = split_path_win32
+
+def copy_attrs(orig, dest, names, only_if_set=False):
+ for a in to_list(names):
+ u = getattr(orig, a, ())
+ if u or not only_if_set:
+ setattr(dest, a, u)
+
+def def_attrs(cls, **kw):
+ '''
+ set attributes for class.
+ @param cls [any class]: the class to update the given attributes in.
+ @param kw [dictionary]: dictionary of attributes names and values.
+
+ if the given class hasn't one (or more) of these attributes, add the attribute with its value to the class.
+ '''
+ for k, v in kw.iteritems():
+ if not hasattr(cls, k):
+ setattr(cls, k, v)
+
+def quote_define_name(path):
+ fu = re.compile("[^a-zA-Z0-9]").sub("_", path)
+ fu = fu.upper()
+ return fu
+
+def quote_whitespace(path):
+ return (path.strip().find(' ') > 0 and '"%s"' % path or path).replace('""', '"')
+
+def trimquotes(s):
+ if not s: return ''
+ s = s.rstrip()
+ if s[0] == "'" and s[-1] == "'": return s[1:-1]
+ return s
+
+def h_list(lst):
+ m = md5()
+ m.update(str(lst))
+ return m.digest()
+
+def h_fun(fun):
+ try:
+ return fun.code
+ except AttributeError:
+ try:
+ h = inspect.getsource(fun)
+ except IOError:
+ h = "nocode"
+ try:
+ fun.code = h
+ except AttributeError:
+ pass
+ return h
+
+def pprint(col, str, label='', sep='\n'):
+ "print messages in color"
+ sys.stderr.write("%s%s%s %s%s" % (Logs.colors(col), str, Logs.colors.NORMAL, label, sep))
+
+def check_dir(path):
+ """If a folder doesn't exists, create it."""
+ if not os.path.isdir(path):
+ try:
+ os.makedirs(path)
+ except OSError, e:
+ if not os.path.isdir(path):
+ raise Errors.WafError('Cannot create the folder %r' % path, ex=e)
+
+
+def cmd_output(cmd, **kw):
+
+ silent = False
+ if 'silent' in kw:
+ silent = kw['silent']
+ del(kw['silent'])
+
+ if 'e' in kw:
+ tmp = kw['e']
+ del(kw['e'])
+ kw['env'] = tmp
+
+ kw['shell'] = isinstance(cmd, str)
+ kw['stdout'] = pproc.PIPE
+ if silent:
+ kw['stderr'] = pproc.PIPE
+
+ try:
+ p = pproc.Popen(cmd, **kw)
+ output = p.communicate()[0]
+ except OSError, e:
+ raise ValueError(str(e))
+
+ if p.returncode:
+ if not silent:
+ msg = "command execution failed: %s -> %r" % (cmd, str(output))
+ raise ValueError(msg)
+ output = ''
+ return output
+
+reg_subst = re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}")
+def subst_vars(expr, params):
+ "substitute ${PREFIX}/bin in /usr/local/bin"
+ def repl_var(m):
+ if m.group(1):
+ return '\\'
+ if m.group(2):
+ return '$'
+ try:
+ # environments may contain lists
+ return params.get_flat(m.group(3))
+ except AttributeError:
+ return params[m.group(3)]
+ return reg_subst.sub(repl_var, expr)
+
+def unversioned_sys_platform_to_binary_format(unversioned_sys_platform):
+ "infers the binary format from the unversioned_sys_platform name."
+
+ if unversioned_sys_platform in ('linux', 'freebsd', 'netbsd', 'openbsd', 'sunos', 'gnu'):
+ return 'elf'
+ elif unversioned_sys_platform == 'darwin':
+ return 'mac-o'
+ elif unversioned_sys_platform in ('win32', 'cygwin', 'uwin', 'msys'):
+ return 'pe'
+ # TODO we assume all other operating systems are elf, which is not true.
+ # we may set this to 'unknown' and have ccroot and other tools handle the case "gracefully" (whatever that means).
+ return 'elf'
+
+def unversioned_sys_platform():
+ """returns an unversioned name from sys.platform.
+ sys.plaform is not very well defined and depends directly on the python source tree.
+ The version appended to the names is unreliable as it's taken from the build environment at the time python was built,
+ i.e., it's possible to get freebsd7 on a freebsd8 system.
+ So we remove the version from the name, except for special cases where the os has a stupid name like os2 or win32.
+ Some possible values of sys.platform are, amongst others:
+ aix3 aix4 atheos beos5 darwin freebsd2 freebsd3 freebsd4 freebsd5 freebsd6 freebsd7
+ generic gnu0 irix5 irix6 linux2 mac netbsd1 next3 os2emx riscos sunos5 unixware7
+ Investigating the python source tree may reveal more values.
+ """
+ s = sys.platform
+ if s == 'java':
+ # The real OS is hidden under the JVM.
+ from java.lang import System
+ s = System.getProperty('os.name')
+ # see http://lopica.sourceforge.net/os.html for a list of possible values
+ if s == 'Mac OS X':
+ return 'darwin'
+ elif s.startswith('Windows '):
+ return 'win32'
+ elif s == 'OS/2':
+ return 'os2'
+ elif s == 'HP-UX':
+ return 'hpux'
+ elif s in ('SunOS', 'Solaris'):
+ return 'sunos'
+ else: s = s.lower()
+ if s == 'win32' or s.endswith('os2') and s != 'sunos2': return s
+ return re.split('\d+$', s)[0]
+
+#@deprecated('use unversioned_sys_platform instead')
+def detect_platform():
+ """this function has been in the Utils module for some time.
+ It's hard to guess what people have used it for.
+ It seems its goal is to return an unversionned sys.platform, but it's not handling all platforms.
+ For example, the version is not removed on freebsd and netbsd, amongst others.
+ """
+ s = sys.platform
+
+ # known POSIX
+ for x in 'cygwin linux irix sunos hpux aix darwin gnu'.split():
+ # sys.platform may be linux2
+ if s.find(x) >= 0:
+ return x
+
+ # unknown POSIX
+ if os.name in 'posix java os2'.split():
+ return os.name
+
+ return s
+
+def load_tool(tool, tooldir=None):
+ '''
+ load_tool: import a Python module, optionally using several directories.
+ @param tool [string]: name of tool to import.
+ @param tooldir [list]: directories to look for the tool.
+ @return: the loaded module.
+
+ Warning: this function is not thread-safe: plays with sys.path,
+ so must run in sequence.
+ '''
+ if tooldir:
+ assert isinstance(tooldir, list)
+ sys.path = tooldir + sys.path
+ else:
+ tooldir = []
+ try:
+ return __import__(tool)
+ finally:
+ for dt in tooldir:
+ sys.path.remove(dt)
+
+def readf(fname, m='r'):
+ "get the contents of a file, it is not used anywhere for the moment"
+ f = open(fname, m)
+ try:
+ txt = f.read()
+ finally:
+ f.close()
+ return txt
+
+def nada(*k, **kw):
+ """A function that does nothing"""
+ pass
+
+def diff_path(top, subdir):
+ """difference between two absolute paths"""
+ top = os.path.normpath(top).replace('\\', '/').split('/')
+ subdir = os.path.normpath(subdir).replace('\\', '/').split('/')
+ if len(top) == len(subdir): return ''
+ diff = subdir[len(top) - len(subdir):]
+ return os.path.join(*diff)
+
+class Context(object):
+ """A base class for commands to be executed from Waf scripts"""
+
+ def set_curdir(self, dir):
+ self.curdir_ = dir
+
+ def get_curdir(self):
+ try:
+ return self.curdir_
+ except AttributeError:
+ self.curdir_ = os.getcwd()
+ return self.get_curdir()
+
+ curdir = property(get_curdir, set_curdir)
+
+ def recurse(self, dirs, name=''):
+ """The function for calling scripts from folders, it tries to call wscript + function_name
+ and if that file does not exist, it will call the method 'function_name' from a file named wscript
+ the dirs can be a list of folders or a string containing space-separated folder paths
+ """
+ if not name:
+ name = inspect.stack()[1][3]
+
+ if isinstance(dirs, str):
+ dirs = to_list(dirs)
+
+ for x in dirs:
+ if os.path.isabs(x):
+ nexdir = x
+ else:
+ nexdir = os.path.join(self.curdir, x)
+
+ base = os.path.join(nexdir, WSCRIPT_FILE)
+ file_path = base + '_' + name
+
+ try:
+ txt = readf(file_path, m='rU')
+ except (OSError, IOError):
+ try:
+ module = load_module(base)
+ except OSError:
+ raise WscriptError('No such script %s' % base)
+
+ try:
+ f = module.__dict__[name]
+ except KeyError:
+ raise WscriptError('No function %s defined in %s' % (name, base))
+
+ if getattr(self.__class__, 'pre_recurse', None):
+ self.pre_recurse(f, base, nexdir)
+ old = self.curdir
+ self.curdir = nexdir
+ try:
+ f(self)
+ finally:
+ self.curdir = old
+ if getattr(self.__class__, 'post_recurse', None):
+ self.post_recurse(module, base, nexdir)
+ else:
+ dc = {'ctx': self}
+ if getattr(self.__class__, 'pre_recurse', None):
+ dc = self.pre_recurse(txt, file_path, nexdir)
+ old = self.curdir
+ self.curdir = nexdir
+ try:
+ try:
+ exec(compile(txt, file_path, 'exec'), dc)
+ except Exception:
+ exc_type, exc_value, tb = sys.exc_info()
+ raise WscriptError("".join(traceback.format_exception(exc_type, exc_value, tb)), base)
+ finally:
+ self.curdir = old
+ if getattr(self.__class__, 'post_recurse', None):
+ self.post_recurse(txt, file_path, nexdir)
+
+if is_win32:
+ old = shutil.copy2
+ def copy2(src, dst):
+ old(src, dst)
+ shutil.copystat(src, src)
+ setattr(shutil, 'copy2', copy2)
+
+def zip_folder(dir, zip_file_name, prefix):
+ """
+ prefix represents the app to add in the archive
+ """
+ import zipfile
+ zip = zipfile.ZipFile(zip_file_name, 'w', compression=zipfile.ZIP_DEFLATED)
+ base = os.path.abspath(dir)
+
+ if prefix:
+ if prefix[-1] != os.sep:
+ prefix += os.sep
+
+ n = len(base)
+ for root, dirs, files in os.walk(base):
+ for f in files:
+ archive_name = prefix + root[n:] + os.sep + f
+ zip.write(root + os.sep + f, archive_name, zipfile.ZIP_DEFLATED)
+ zip.close()
+
+def get_elapsed_time(start):
+ "Format a time delta (datetime.timedelta) using the format DdHhMmS.MSs"
+ delta = datetime.datetime.now() - start
+ # cast to int necessary for python 3.0
+ days = int(delta.days)
+ hours = int(delta.seconds / 3600)
+ minutes = int((delta.seconds - hours * 3600) / 60)
+ seconds = delta.seconds - hours * 3600 - minutes * 60 \
+ + float(delta.microseconds) / 1000 / 1000
+ result = ''
+ if days:
+ result += '%dd' % days
+ if days or hours:
+ result += '%dh' % hours
+ if days or hours or minutes:
+ result += '%dm' % minutes
+ return '%s%.3fs' % (result, seconds)
+
+if os.name == 'java':
+ # For Jython (they should really fix the inconsistency)
+ try:
+ gc.disable()
+ gc.enable()
+ except NotImplementedError:
+ gc.disable = gc.enable
+
+def run_once(fun):
+ """
+ decorator, make a function cache its results, use like this:
+
+ @run_once
+ def foo(k):
+ return 345*2343
+ """
+ cache = {}
+ def wrap(k):
+ try:
+ return cache[k]
+ except KeyError:
+ ret = fun(k)
+ cache[k] = ret
+ return ret
+ wrap.__cache__ = cache
+ return wrap
diff --git a/third_party/waf/wafadmin/__init__.py b/third_party/waf/wafadmin/__init__.py
new file mode 100644
index 0000000..01273cf
--- /dev/null
+++ b/third_party/waf/wafadmin/__init__.py
@@ -0,0 +1,3 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2005 (ita)
diff --git a/third_party/waf/wafadmin/ansiterm.py b/third_party/waf/wafadmin/ansiterm.py
new file mode 100644
index 0000000..2ec0b4c
--- /dev/null
+++ b/third_party/waf/wafadmin/ansiterm.py
@@ -0,0 +1,235 @@
+import sys, os
+try:
+ if (not sys.stderr.isatty()) or (not sys.stdout.isatty()):
+ raise ValueError('not a tty')
+
+ from ctypes import *
+
+ class COORD(Structure):
+ _fields_ = [("X", c_short), ("Y", c_short)]
+
+ class SMALL_RECT(Structure):
+ _fields_ = [("Left", c_short), ("Top", c_short), ("Right", c_short), ("Bottom", c_short)]
+
+ class CONSOLE_SCREEN_BUFFER_INFO(Structure):
+ _fields_ = [("Size", COORD), ("CursorPosition", COORD), ("Attributes", c_short), ("Window", SMALL_RECT), ("MaximumWindowSize", COORD)]
+
+ class CONSOLE_CURSOR_INFO(Structure):
+ _fields_ = [('dwSize',c_ulong), ('bVisible', c_int)]
+
+ sbinfo = CONSOLE_SCREEN_BUFFER_INFO()
+ csinfo = CONSOLE_CURSOR_INFO()
+ hconsole = windll.kernel32.GetStdHandle(-11)
+ windll.kernel32.GetConsoleScreenBufferInfo(hconsole, byref(sbinfo))
+ if sbinfo.Size.X < 10 or sbinfo.Size.Y < 10: raise Exception('small console')
+ windll.kernel32.GetConsoleCursorInfo(hconsole, byref(csinfo))
+except Exception:
+ pass
+else:
+ import re, threading
+
+ to_int = lambda number, default: number and int(number) or default
+ wlock = threading.Lock()
+
+ STD_OUTPUT_HANDLE = -11
+ STD_ERROR_HANDLE = -12
+
+ class AnsiTerm(object):
+ def __init__(self):
+ self.hconsole = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
+ self.cursor_history = []
+ self.orig_sbinfo = CONSOLE_SCREEN_BUFFER_INFO()
+ self.orig_csinfo = CONSOLE_CURSOR_INFO()
+ windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(self.orig_sbinfo))
+ windll.kernel32.GetConsoleCursorInfo(hconsole, byref(self.orig_csinfo))
+
+
+ def screen_buffer_info(self):
+ sbinfo = CONSOLE_SCREEN_BUFFER_INFO()
+ windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(sbinfo))
+ return sbinfo
+
+ def clear_line(self, param):
+ mode = param and int(param) or 0
+ sbinfo = self.screen_buffer_info()
+ if mode == 1: # Clear from begining of line to cursor position
+ line_start = COORD(0, sbinfo.CursorPosition.Y)
+ line_length = sbinfo.Size.X
+ elif mode == 2: # Clear entire line
+ line_start = COORD(sbinfo.CursorPosition.X, sbinfo.CursorPosition.Y)
+ line_length = sbinfo.Size.X - sbinfo.CursorPosition.X
+ else: # Clear from cursor position to end of line
+ line_start = sbinfo.CursorPosition
+ line_length = sbinfo.Size.X - sbinfo.CursorPosition.X
+ chars_written = c_int()
+ windll.kernel32.FillConsoleOutputCharacterA(self.hconsole, c_char(' '), line_length, line_start, byref(chars_written))
+ windll.kernel32.FillConsoleOutputAttribute(self.hconsole, sbinfo.Attributes, line_length, line_start, byref(chars_written))
+
+ def clear_screen(self, param):
+ mode = to_int(param, 0)
+ sbinfo = self.screen_buffer_info()
+ if mode == 1: # Clear from begining of screen to cursor position
+ clear_start = COORD(0, 0)
+ clear_length = sbinfo.CursorPosition.X * sbinfo.CursorPosition.Y
+ elif mode == 2: # Clear entire screen and return cursor to home
+ clear_start = COORD(0, 0)
+ clear_length = sbinfo.Size.X * sbinfo.Size.Y
+ windll.kernel32.SetConsoleCursorPosition(self.hconsole, clear_start)
+ else: # Clear from cursor position to end of screen
+ clear_start = sbinfo.CursorPosition
+ clear_length = ((sbinfo.Size.X - sbinfo.CursorPosition.X) + sbinfo.Size.X * (sbinfo.Size.Y - sbinfo.CursorPosition.Y))
+ chars_written = c_int()
+ windll.kernel32.FillConsoleOutputCharacterA(self.hconsole, c_char(' '), clear_length, clear_start, byref(chars_written))
+ windll.kernel32.FillConsoleOutputAttribute(self.hconsole, sbinfo.Attributes, clear_length, clear_start, byref(chars_written))
+
+ def push_cursor(self, param):
+ sbinfo = self.screen_buffer_info()
+ self.cursor_history.push(sbinfo.CursorPosition)
+
+ def pop_cursor(self, param):
+ if self.cursor_history:
+ old_pos = self.cursor_history.pop()
+ windll.kernel32.SetConsoleCursorPosition(self.hconsole, old_pos)
+
+ def set_cursor(self, param):
+ x, sep, y = param.partition(';')
+ x = to_int(x, 1) - 1
+ y = to_int(y, 1) - 1
+ sbinfo = self.screen_buffer_info()
+ new_pos = COORD(
+ min(max(0, x), sbinfo.Size.X),
+ min(max(0, y), sbinfo.Size.Y)
+ )
+ windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos)
+
+ def set_column(self, param):
+ x = to_int(param, 1) - 1
+ sbinfo = self.screen_buffer_info()
+ new_pos = COORD(
+ min(max(0, x), sbinfo.Size.X),
+ sbinfo.CursorPosition.Y
+ )
+ windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos)
+
+ def move_cursor(self, x_offset=0, y_offset=0):
+ sbinfo = self.screen_buffer_info()
+ new_pos = COORD(
+ min(max(0, sbinfo.CursorPosition.X + x_offset), sbinfo.Size.X),
+ min(max(0, sbinfo.CursorPosition.Y + y_offset), sbinfo.Size.Y)
+ )
+ windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos)
+
+ def move_up(self, param):
+ self.move_cursor(y_offset = -to_int(param, 1))
+
+ def move_down(self, param):
+ self.move_cursor(y_offset = to_int(param, 1))
+
+ def move_left(self, param):
+ self.move_cursor(x_offset = -to_int(param, 1))
+
+ def move_right(self, param):
+ self.move_cursor(x_offset = to_int(param, 1))
+
+ def next_line(self, param):
+ sbinfo = self.screen_buffer_info()
+ self.move_cursor(
+ x_offset = -sbinfo.CursorPosition.X,
+ y_offset = to_int(param, 1)
+ )
+
+ def prev_line(self, param):
+ sbinfo = self.screen_buffer_info()
+ self.move_cursor(
+ x_offset = -sbinfo.CursorPosition.X,
+ y_offset = -to_int(param, 1)
+ )
+
+ escape_to_color = { (0, 30): 0x0, #black
+ (0, 31): 0x4, #red
+ (0, 32): 0x2, #green
+ (0, 33): 0x4+0x2, #dark yellow
+ (0, 34): 0x1, #blue
+ (0, 35): 0x1+0x4, #purple
+ (0, 36): 0x2+0x4, #cyan
+ (0, 37): 0x1+0x2+0x4, #grey
+ (1, 30): 0x1+0x2+0x4, #dark gray
+ (1, 31): 0x4+0x8, #red
+ (1, 32): 0x2+0x8, #light green
+ (1, 33): 0x4+0x2+0x8, #yellow
+ (1, 34): 0x1+0x8, #light blue
+ (1, 35): 0x1+0x4+0x8, #light purple
+ (1, 36): 0x1+0x2+0x8, #light cyan
+ (1, 37): 0x1+0x2+0x4+0x8, #white
+ }
+
+ def set_color(self, param):
+ cols = param.split(';')
+ attr = self.orig_sbinfo.Attributes
+ for c in cols:
+ c = to_int(c, 0)
+ if c in range(30,38):
+ attr = (attr & 0xf0) | (self.escape_to_color.get((0,c), 0x7))
+ elif c in range(40,48):
+ attr = (attr & 0x0f) | (self.escape_to_color.get((0,c), 0x7) << 8)
+ elif c in range(90,98):
+ attr = (attr & 0xf0) | (self.escape_to_color.get((1,c-60), 0x7))
+ elif c in range(100,108):
+ attr = (attr & 0x0f) | (self.escape_to_color.get((1,c-60), 0x7) << 8)
+ elif c == 1:
+ attr |= 0x08
+ windll.kernel32.SetConsoleTextAttribute(self.hconsole, attr)
+
+ def show_cursor(self,param):
+ csinfo.bVisible = 1
+ windll.kernel32.SetConsoleCursorInfo(self.hconsole, byref(csinfo))
+
+ def hide_cursor(self,param):
+ csinfo.bVisible = 0
+ windll.kernel32.SetConsoleCursorInfo(self.hconsole, byref(csinfo))
+
+ ansi_command_table = {
+ 'A': move_up,
+ 'B': move_down,
+ 'C': move_right,
+ 'D': move_left,
+ 'E': next_line,
+ 'F': prev_line,
+ 'G': set_column,
+ 'H': set_cursor,
+ 'f': set_cursor,
+ 'J': clear_screen,
+ 'K': clear_line,
+ 'h': show_cursor,
+ 'l': hide_cursor,
+ 'm': set_color,
+ 's': push_cursor,
+ 'u': pop_cursor,
+ }
+ # Match either the escape sequence or text not containing escape sequence
+ ansi_tokans = re.compile('(?:\x1b\[([0-9?;]*)([a-zA-Z])|([^\x1b]+))')
+ def write(self, text):
+ try:
+ wlock.acquire()
+ for param, cmd, txt in self.ansi_tokans.findall(text):
+ if cmd:
+ cmd_func = self.ansi_command_table.get(cmd)
+ if cmd_func:
+ cmd_func(self, param)
+ else:
+ chars_written = c_int()
+ if isinstance(txt, unicode):
+ windll.kernel32.WriteConsoleW(self.hconsole, txt, len(txt), byref(chars_written), None)
+ else:
+ windll.kernel32.WriteConsoleA(self.hconsole, txt, len(txt), byref(chars_written), None)
+ finally:
+ wlock.release()
+
+ def flush(self):
+ pass
+
+ def isatty(self):
+ return True
+
+ sys.stderr = sys.stdout = AnsiTerm()
+ os.environ['TERM'] = 'vt100'
diff --git a/third_party/waf/wafadmin/pproc.py b/third_party/waf/wafadmin/pproc.py
new file mode 100644
index 0000000..44b9dd2
--- /dev/null
+++ b/third_party/waf/wafadmin/pproc.py
@@ -0,0 +1,619 @@
+# borrowed from python 2.5.2c1
+# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
+# Licensed to PSF under a Contributor Agreement.
+
+import sys
+mswindows = (sys.platform == "win32")
+
+import os
+import types
+import traceback
+import gc
+
+class CalledProcessError(Exception):
+ def __init__(self, returncode, cmd):
+ self.returncode = returncode
+ self.cmd = cmd
+ def __str__(self):
+ return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
+
+if mswindows:
+ import threading
+ import msvcrt
+ if 0:
+ import pywintypes
+ from win32api import GetStdHandle, STD_INPUT_HANDLE, \
+ STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
+ from win32api import GetCurrentProcess, DuplicateHandle, \
+ GetModuleFileName, GetVersion
+ from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
+ from win32pipe import CreatePipe
+ from win32process import CreateProcess, STARTUPINFO, \
+ GetExitCodeProcess, STARTF_USESTDHANDLES, \
+ STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
+ from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
+ else:
+ from _subprocess import *
+ class STARTUPINFO:
+ dwFlags = 0
+ hStdInput = None
+ hStdOutput = None
+ hStdError = None
+ wShowWindow = 0
+ class pywintypes:
+ error = IOError
+else:
+ import select
+ import errno
+ import fcntl
+ import pickle
+
+__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"]
+
+try:
+ MAXFD = os.sysconf("SC_OPEN_MAX")
+except:
+ MAXFD = 256
+
+try:
+ False
+except NameError:
+ False = 0
+ True = 1
+
+_active = []
+
+def _cleanup():
+ for inst in _active[:]:
+ if inst.poll(_deadstate=sys.maxint) >= 0:
+ try:
+ _active.remove(inst)
+ except ValueError:
+ pass
+
+PIPE = -1
+STDOUT = -2
+
+
+def call(*popenargs, **kwargs):
+ return Popen(*popenargs, **kwargs).wait()
+
+def check_call(*popenargs, **kwargs):
+ retcode = call(*popenargs, **kwargs)
+ cmd = kwargs.get("args")
+ if cmd is None:
+ cmd = popenargs[0]
+ if retcode:
+ raise CalledProcessError(retcode, cmd)
+ return retcode
+
+
+def list2cmdline(seq):
+ result = []
+ needquote = False
+ for arg in seq:
+ bs_buf = []
+
+ if result:
+ result.append(' ')
+
+ needquote = (" " in arg) or ("\t" in arg) or arg == ""
+ if needquote:
+ result.append('"')
+
+ for c in arg:
+ if c == '\\':
+ bs_buf.append(c)
+ elif c == '"':
+ result.append('\\' * len(bs_buf)*2)
+ bs_buf = []
+ result.append('\\"')
+ else:
+ if bs_buf:
+ result.extend(bs_buf)
+ bs_buf = []
+ result.append(c)
+
+ if bs_buf:
+ result.extend(bs_buf)
+
+ if needquote:
+ result.extend(bs_buf)
+ result.append('"')
+
+ return ''.join(result)
+
+class Popen(object):
+ def __init__(self, args, bufsize=0, executable=None,
+ stdin=None, stdout=None, stderr=None,
+ preexec_fn=None, close_fds=False, shell=False,
+ cwd=None, env=None, universal_newlines=False,
+ startupinfo=None, creationflags=0):
+ _cleanup()
+
+ self._child_created = False
+ if not isinstance(bufsize, (int, long)):
+ raise TypeError("bufsize must be an integer")
+
+ if mswindows:
+ if preexec_fn is not None:
+ raise ValueError("preexec_fn is not supported on Windows platforms")
+ if close_fds:
+ raise ValueError("close_fds is not supported on Windows platforms")
+ else:
+ if startupinfo is not None:
+ raise ValueError("startupinfo is only supported on Windows platforms")
+ if creationflags != 0:
+ raise ValueError("creationflags is only supported on Windows platforms")
+
+ self.stdin = None
+ self.stdout = None
+ self.stderr = None
+ self.pid = None
+ self.returncode = None
+ self.universal_newlines = universal_newlines
+
+ (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite) = self._get_handles(stdin, stdout, stderr)
+
+ self._execute_child(args, executable, preexec_fn, close_fds,
+ cwd, env, universal_newlines,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+ if mswindows:
+ if stdin is None and p2cwrite is not None:
+ os.close(p2cwrite)
+ p2cwrite = None
+ if stdout is None and c2pread is not None:
+ os.close(c2pread)
+ c2pread = None
+ if stderr is None and errread is not None:
+ os.close(errread)
+ errread = None
+
+ if p2cwrite:
+ self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
+ if c2pread:
+ if universal_newlines:
+ self.stdout = os.fdopen(c2pread, 'rU', bufsize)
+ else:
+ self.stdout = os.fdopen(c2pread, 'rb', bufsize)
+ if errread:
+ if universal_newlines:
+ self.stderr = os.fdopen(errread, 'rU', bufsize)
+ else:
+ self.stderr = os.fdopen(errread, 'rb', bufsize)
+
+
+ def _translate_newlines(self, data):
+ data = data.replace("\r\n", "\n")
+ data = data.replace("\r", "\n")
+ return data
+
+
+ def __del__(self, sys=sys):
+ if not self._child_created:
+ return
+ self.poll(_deadstate=sys.maxint)
+ if self.returncode is None and _active is not None:
+ _active.append(self)
+
+
+ def communicate(self, input=None):
+ if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
+ stdout = None
+ stderr = None
+ if self.stdin:
+ if input:
+ self.stdin.write(input)
+ self.stdin.close()
+ elif self.stdout:
+ stdout = self.stdout.read()
+ elif self.stderr:
+ stderr = self.stderr.read()
+ self.wait()
+ return (stdout, stderr)
+
+ return self._communicate(input)
+
+
+ if mswindows:
+ def _get_handles(self, stdin, stdout, stderr):
+ if stdin is None and stdout is None and stderr is None:
+ return (None, None, None, None, None, None)
+
+ p2cread, p2cwrite = None, None
+ c2pread, c2pwrite = None, None
+ errread, errwrite = None, None
+
+ if stdin is None:
+ p2cread = GetStdHandle(STD_INPUT_HANDLE)
+ if p2cread is not None:
+ pass
+ elif stdin is None or stdin == PIPE:
+ p2cread, p2cwrite = CreatePipe(None, 0)
+ p2cwrite = p2cwrite.Detach()
+ p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
+ elif isinstance(stdin, int):
+ p2cread = msvcrt.get_osfhandle(stdin)
+ else:
+ p2cread = msvcrt.get_osfhandle(stdin.fileno())
+ p2cread = self._make_inheritable(p2cread)
+
+ if stdout is None:
+ c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
+ if c2pwrite is not None:
+ pass
+ elif stdout is None or stdout == PIPE:
+ c2pread, c2pwrite = CreatePipe(None, 0)
+ c2pread = c2pread.Detach()
+ c2pread = msvcrt.open_osfhandle(c2pread, 0)
+ elif isinstance(stdout, int):
+ c2pwrite = msvcrt.get_osfhandle(stdout)
+ else:
+ c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
+ c2pwrite = self._make_inheritable(c2pwrite)
+
+ if stderr is None:
+ errwrite = GetStdHandle(STD_ERROR_HANDLE)
+ if errwrite is not None:
+ pass
+ elif stderr is None or stderr == PIPE:
+ errread, errwrite = CreatePipe(None, 0)
+ errread = errread.Detach()
+ errread = msvcrt.open_osfhandle(errread, 0)
+ elif stderr == STDOUT:
+ errwrite = c2pwrite
+ elif isinstance(stderr, int):
+ errwrite = msvcrt.get_osfhandle(stderr)
+ else:
+ errwrite = msvcrt.get_osfhandle(stderr.fileno())
+ errwrite = self._make_inheritable(errwrite)
+
+ return (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+ def _make_inheritable(self, handle):
+ return DuplicateHandle(GetCurrentProcess(), handle, GetCurrentProcess(), 0, 1, DUPLICATE_SAME_ACCESS)
+
+ def _find_w9xpopen(self):
+ w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)), "w9xpopen.exe")
+ if not os.path.exists(w9xpopen):
+ w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix), "w9xpopen.exe")
+ if not os.path.exists(w9xpopen):
+ raise RuntimeError("Cannot locate w9xpopen.exe, which is needed for Popen to work with your shell or platform.")
+ return w9xpopen
+
+ def _execute_child(self, args, executable, preexec_fn, close_fds,
+ cwd, env, universal_newlines,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite):
+
+ if not isinstance(args, types.StringTypes):
+ args = list2cmdline(args)
+
+ if startupinfo is None:
+ startupinfo = STARTUPINFO()
+ if None not in (p2cread, c2pwrite, errwrite):
+ startupinfo.dwFlags |= STARTF_USESTDHANDLES
+ startupinfo.hStdInput = p2cread
+ startupinfo.hStdOutput = c2pwrite
+ startupinfo.hStdError = errwrite
+
+ if shell:
+ startupinfo.dwFlags |= STARTF_USESHOWWINDOW
+ startupinfo.wShowWindow = SW_HIDE
+ comspec = os.environ.get("COMSPEC", "cmd.exe")
+ args = comspec + " /c " + args
+ if (GetVersion() >= 0x80000000L or
+ os.path.basename(comspec).lower() == "command.com"):
+ w9xpopen = self._find_w9xpopen()
+ args = '"%s" %s' % (w9xpopen, args)
+ creationflags |= CREATE_NEW_CONSOLE
+
+ try:
+ hp, ht, pid, tid = CreateProcess(executable, args, None, None, 1, creationflags, env, cwd, startupinfo)
+ except pywintypes.error, e:
+ raise WindowsError(*e.args)
+
+ self._child_created = True
+ self._handle = hp
+ self.pid = pid
+ ht.Close()
+
+ if p2cread is not None:
+ p2cread.Close()
+ if c2pwrite is not None:
+ c2pwrite.Close()
+ if errwrite is not None:
+ errwrite.Close()
+
+
+ def poll(self, _deadstate=None):
+ if self.returncode is None:
+ if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
+ self.returncode = GetExitCodeProcess(self._handle)
+ return self.returncode
+
+
+ def wait(self):
+ if self.returncode is None:
+ obj = WaitForSingleObject(self._handle, INFINITE)
+ self.returncode = GetExitCodeProcess(self._handle)
+ return self.returncode
+
+ def _readerthread(self, fh, buffer):
+ buffer.append(fh.read())
+
+ def _communicate(self, input):
+ stdout = None
+ stderr = None
+
+ if self.stdout:
+ stdout = []
+ stdout_thread = threading.Thread(target=self._readerthread, args=(self.stdout, stdout))
+ stdout_thread.setDaemon(True)
+ stdout_thread.start()
+ if self.stderr:
+ stderr = []
+ stderr_thread = threading.Thread(target=self._readerthread, args=(self.stderr, stderr))
+ stderr_thread.setDaemon(True)
+ stderr_thread.start()
+
+ if self.stdin:
+ if input is not None:
+ self.stdin.write(input)
+ self.stdin.close()
+
+ if self.stdout:
+ stdout_thread.join()
+ if self.stderr:
+ stderr_thread.join()
+
+ if stdout is not None:
+ stdout = stdout[0]
+ if stderr is not None:
+ stderr = stderr[0]
+
+ if self.universal_newlines and hasattr(file, 'newlines'):
+ if stdout:
+ stdout = self._translate_newlines(stdout)
+ if stderr:
+ stderr = self._translate_newlines(stderr)
+
+ self.wait()
+ return (stdout, stderr)
+
+ else:
+ def _get_handles(self, stdin, stdout, stderr):
+ p2cread, p2cwrite = None, None
+ c2pread, c2pwrite = None, None
+ errread, errwrite = None, None
+
+ if stdin is None:
+ pass
+ elif stdin == PIPE:
+ p2cread, p2cwrite = os.pipe()
+ elif isinstance(stdin, int):
+ p2cread = stdin
+ else:
+ p2cread = stdin.fileno()
+
+ if stdout is None:
+ pass
+ elif stdout == PIPE:
+ c2pread, c2pwrite = os.pipe()
+ elif isinstance(stdout, int):
+ c2pwrite = stdout
+ else:
+ c2pwrite = stdout.fileno()
+
+ if stderr is None:
+ pass
+ elif stderr == PIPE:
+ errread, errwrite = os.pipe()
+ elif stderr == STDOUT:
+ errwrite = c2pwrite
+ elif isinstance(stderr, int):
+ errwrite = stderr
+ else:
+ errwrite = stderr.fileno()
+
+ return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite)
+
+ def _set_cloexec_flag(self, fd):
+ try:
+ cloexec_flag = fcntl.FD_CLOEXEC
+ except AttributeError:
+ cloexec_flag = 1
+
+ old = fcntl.fcntl(fd, fcntl.F_GETFD)
+ fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
+
+ def _close_fds(self, but):
+ for i in xrange(3, MAXFD):
+ if i == but:
+ continue
+ try:
+ os.close(i)
+ except:
+ pass
+
+ def _execute_child(self, args, executable, preexec_fn, close_fds,
+ cwd, env, universal_newlines, startupinfo, creationflags, shell,
+ p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite):
+
+ if isinstance(args, types.StringTypes):
+ args = [args]
+ else:
+ args = list(args)
+
+ if shell:
+ args = ["/bin/sh", "-c"] + args
+
+ if executable is None:
+ executable = args[0]
+
+ errpipe_read, errpipe_write = os.pipe()
+ self._set_cloexec_flag(errpipe_write)
+
+ gc_was_enabled = gc.isenabled()
+ gc.disable()
+ try:
+ self.pid = os.fork()
+ except:
+ if gc_was_enabled:
+ gc.enable()
+ raise
+ self._child_created = True
+ if self.pid == 0:
+ try:
+ if p2cwrite:
+ os.close(p2cwrite)
+ if c2pread:
+ os.close(c2pread)
+ if errread:
+ os.close(errread)
+ os.close(errpipe_read)
+
+ if p2cread:
+ os.dup2(p2cread, 0)
+ if c2pwrite:
+ os.dup2(c2pwrite, 1)
+ if errwrite:
+ os.dup2(errwrite, 2)
+
+ if p2cread and p2cread not in (0,):
+ os.close(p2cread)
+ if c2pwrite and c2pwrite not in (p2cread, 1):
+ os.close(c2pwrite)
+ if errwrite and errwrite not in (p2cread, c2pwrite, 2):
+ os.close(errwrite)
+
+ if close_fds:
+ self._close_fds(but=errpipe_write)
+
+ if cwd is not None:
+ os.chdir(cwd)
+
+ if preexec_fn:
+ apply(preexec_fn)
+
+ if env is None:
+ os.execvp(executable, args)
+ else:
+ os.execvpe(executable, args, env)
+
+ except:
+ exc_type, exc_value, tb = sys.exc_info()
+ exc_lines = traceback.format_exception(exc_type, exc_value, tb)
+ exc_value.child_traceback = ''.join(exc_lines)
+ os.write(errpipe_write, pickle.dumps(exc_value))
+
+ os._exit(255)
+
+ if gc_was_enabled:
+ gc.enable()
+ os.close(errpipe_write)
+ if p2cread and p2cwrite:
+ os.close(p2cread)
+ if c2pwrite and c2pread:
+ os.close(c2pwrite)
+ if errwrite and errread:
+ os.close(errwrite)
+
+ data = os.read(errpipe_read, 1048576)
+ os.close(errpipe_read)
+ if data != "":
+ os.waitpid(self.pid, 0)
+ child_exception = pickle.loads(data)
+ raise child_exception
+
+ def _handle_exitstatus(self, sts):
+ if os.WIFSIGNALED(sts):
+ self.returncode = -os.WTERMSIG(sts)
+ elif os.WIFEXITED(sts):
+ self.returncode = os.WEXITSTATUS(sts)
+ else:
+ raise RuntimeError("Unknown child exit status!")
+
+ def poll(self, _deadstate=None):
+ if self.returncode is None:
+ try:
+ pid, sts = os.waitpid(self.pid, os.WNOHANG)
+ if pid == self.pid:
+ self._handle_exitstatus(sts)
+ except os.error:
+ if _deadstate is not None:
+ self.returncode = _deadstate
+ return self.returncode
+
+ def wait(self):
+ if self.returncode is None:
+ pid, sts = os.waitpid(self.pid, 0)
+ self._handle_exitstatus(sts)
+ return self.returncode
+
+ def _communicate(self, input):
+ read_set = []
+ write_set = []
+ stdout = None
+ stderr = None
+
+ if self.stdin:
+ self.stdin.flush()
+ if input:
+ write_set.append(self.stdin)
+ else:
+ self.stdin.close()
+ if self.stdout:
+ read_set.append(self.stdout)
+ stdout = []
+ if self.stderr:
+ read_set.append(self.stderr)
+ stderr = []
+
+ input_offset = 0
+ while read_set or write_set:
+ rlist, wlist, xlist = select.select(read_set, write_set, [])
+
+ if self.stdin in wlist:
+ bytes_written = os.write(self.stdin.fileno(), buffer(input, input_offset, 512))
+ input_offset += bytes_written
+ if input_offset >= len(input):
+ self.stdin.close()
+ write_set.remove(self.stdin)
+
+ if self.stdout in rlist:
+ data = os.read(self.stdout.fileno(), 1024)
+ if data == "":
+ self.stdout.close()
+ read_set.remove(self.stdout)
+ stdout.append(data)
+
+ if self.stderr in rlist:
+ data = os.read(self.stderr.fileno(), 1024)
+ if data == "":
+ self.stderr.close()
+ read_set.remove(self.stderr)
+ stderr.append(data)
+
+ if stdout is not None:
+ stdout = ''.join(stdout)
+ if stderr is not None:
+ stderr = ''.join(stderr)
+
+ if self.universal_newlines and hasattr(file, 'newlines'):
+ if stdout:
+ stdout = self._translate_newlines(stdout)
+ if stderr:
+ stderr = self._translate_newlines(stderr)
+
+ self.wait()
+ return (stdout, stderr)
diff --git a/third_party/waf/wafadmin/py3kfixes.py b/third_party/waf/wafadmin/py3kfixes.py
new file mode 100644
index 0000000..1a64706
--- /dev/null
+++ b/third_party/waf/wafadmin/py3kfixes.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python
+# encoding: utf-8
+# Thomas Nagy, 2009 (ita)
+
+"""
+Fixes for py3k go here
+"""
+
+import os
+
+all_modifs = {}
+
+def modif(dir, name, fun):
+ if name == '*':
+ lst = []
+ for y in '. Tools 3rdparty'.split():
+ for x in os.listdir(os.path.join(dir, y)):
+ if x.endswith('.py'):
+ lst.append(y + os.sep + x)
+ #lst = [y + os.sep + x for x in os.listdir(os.path.join(dir, y)) for y in '. Tools 3rdparty'.split() if x.endswith('.py')]
+ for x in lst:
+ modif(dir, x, fun)
+ return
+
+ filename = os.path.join(dir, name)
+ f = open(filename, 'r')
+ txt = f.read()
+ f.close()
+
+ txt = fun(txt)
+
+ f = open(filename, 'w')
+ f.write(txt)
+ f.close()
+
+def subst(filename):
+ def do_subst(fun):
+ global all_modifs
+ try:
+ all_modifs[filename] += fun
+ except KeyError:
+ all_modifs[filename] = [fun]
+ return fun
+ return do_subst
+
+@subst('Constants.py')
+def r1(code):
+ code = code.replace("'iluvcuteoverload'", "b'iluvcuteoverload'")
+ code = code.replace("ABI=7", "ABI=37")
+ return code
+
+@subst('Tools/ccroot.py')
+def r2(code):
+ code = code.replace("p.stdin.write('\\n')", "p.stdin.write(b'\\n')")
+ code = code.replace('p.communicate()[0]', 'p.communicate()[0].decode("utf-8")')
+ return code
+
+@subst('Utils.py')
+def r3(code):
+ code = code.replace("m.update(str(lst))", "m.update(str(lst).encode())")
+ code = code.replace('p.communicate()[0]', 'p.communicate()[0].decode("utf-8")')
+ return code
+
+@subst('ansiterm.py')
+def r33(code):
+ code = code.replace('unicode', 'str')
+ return code
+
+@subst('Task.py')
+def r4(code):
+ code = code.replace("up(self.__class__.__name__)", "up(self.__class__.__name__.encode())")
+ code = code.replace("up(self.env.variant())", "up(self.env.variant().encode())")
+ code = code.replace("up(x.parent.abspath())", "up(x.parent.abspath().encode())")
+ code = code.replace("up(x.name)", "up(x.name.encode())")
+ code = code.replace('class TaskBase(object):\n\t__metaclass__=store_task_type', 'import binascii\n\nclass TaskBase(object, metaclass=store_task_type):')
+ code = code.replace('keys=self.cstr_groups.keys()', 'keys=list(self.cstr_groups.keys())')
+ code = code.replace("sig.encode('hex')", 'binascii.hexlify(sig)')
+ code = code.replace("os.path.join(Options.cache_global,ssig)", "os.path.join(Options.cache_global,ssig.decode())")
+ return code
+
+@subst('Build.py')
+def r5(code):
+ code = code.replace("cPickle.dump(data,file,-1)", "cPickle.dump(data,file)")
+ code = code.replace('for node in src_dir_node.childs.values():', 'for node in list(src_dir_node.childs.values()):')
+ return code
+
+@subst('*')
+def r6(code):
+ code = code.replace('xrange', 'range')
+ code = code.replace('iteritems', 'items')
+ code = code.replace('maxint', 'maxsize')
+ code = code.replace('iterkeys', 'keys')
+ code = code.replace('Error,e:', 'Error as e:')
+ code = code.replace('Exception,e:', 'Exception as e:')
+ return code
+
+@subst('TaskGen.py')
+def r7(code):
+ code = code.replace('class task_gen(object):\n\t__metaclass__=register_obj', 'class task_gen(object, metaclass=register_obj):')
+ return code
+
+@subst('Tools/python.py')
+def r8(code):
+ code = code.replace('proc.communicate()[0]', 'proc.communicate()[0].decode("utf-8")')
+ return code
+
+@subst('Tools/glib2.py')
+def r9(code):
+ code = code.replace('f.write(c)', 'f.write(c.encode("utf-8"))')
+ return code
+
+@subst('Tools/config_c.py')
+def r10(code):
+ code = code.replace("key=kw['success']", "key=kw['success']\n\t\t\t\ttry:\n\t\t\t\t\tkey=key.decode('utf-8')\n\t\t\t\texcept:\n\t\t\t\t\tpass")
+ code = code.replace('out=str(out)','out=out.decode("utf-8")')
+ code = code.replace('err=str(err)','err=err.decode("utf-8")')
+ return code
+
+@subst('Tools/d.py')
+def r11(code):
+ code = code.replace('ret.strip()', 'ret.strip().decode("utf-8")')
+ return code
+
+def fixdir(dir):
+ global all_modifs
+ for k in all_modifs:
+ for v in all_modifs[k]:
+ modif(os.path.join(dir, 'wafadmin'), k, v)
+ #print('substitutions finished')
diff --git a/tools/tdbbackup.c b/tools/tdbbackup.c
new file mode 100644
index 0000000..eb33e25
--- /dev/null
+++ b/tools/tdbbackup.c
@@ -0,0 +1,349 @@
+/*
+ Unix SMB/CIFS implementation.
+ low level tdb backup and restore utility
+ Copyright (C) Andrew Tridgell 2002
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/*
+
+ This program is meant for backup/restore of tdb databases. Typical usage would be:
+ tdbbackup *.tdb
+ when Samba shuts down cleanly, which will make a backup of all the local databases
+ to *.bak files. Then on Samba startup you would use:
+ tdbbackup -v *.tdb
+ and this will check the databases for corruption and if corruption is detected then
+ the backup will be restored.
+
+ You may also like to do a backup on a regular basis while Samba is
+ running, perhaps using cron.
+
+ The reason this program is needed is to cope with power failures
+ while Samba is running. A power failure could lead to database
+ corruption and Samba will then not start correctly.
+
+ Note that many of the databases in Samba are transient and thus
+ don't need to be backed up, so you can optimise the above a little
+ by only running the backup on the critical databases.
+
+ */
+
+#include "replace.h"
+#include "system/locale.h"
+#include "system/time.h"
+#include "system/filesys.h"
+#include "system/wait.h"
+#include "tdb.h"
+
+#ifdef HAVE_GETOPT_H
+#include <getopt.h>
+#endif
+
+static int failed;
+
+static struct tdb_logging_context log_ctx;
+
+#ifdef PRINTF_ATTRIBUTE
+static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...) PRINTF_ATTRIBUTE(3,4);
+#endif
+static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ vfprintf(stdout, format, ap);
+ va_end(ap);
+ fflush(stdout);
+}
+
+static char *add_suffix(const char *name, const char *suffix)
+{
+ char *ret;
+ int len = strlen(name) + strlen(suffix) + 1;
+ ret = (char *)malloc(len);
+ if (!ret) {
+ fprintf(stderr,"Out of memory!\n");
+ exit(1);
+ }
+ snprintf(ret, len, "%s%s", name, suffix);
+ return ret;
+}
+
+static int copy_fn(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, void *state)
+{
+ TDB_CONTEXT *tdb_new = (TDB_CONTEXT *)state;
+
+ if (tdb_store(tdb_new, key, dbuf, TDB_INSERT) != 0) {
+ fprintf(stderr,"Failed to insert into %s\n", tdb_name(tdb_new));
+ failed = 1;
+ return 1;
+ }
+ return 0;
+}
+
+
+static int test_fn(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, void *state)
+{
+ return 0;
+}
+
+/*
+ carefully backup a tdb, validating the contents and
+ only doing the backup if its OK
+ this function is also used for restore
+*/
+static int backup_tdb(const char *old_name, const char *new_name,
+ int hash_size, int nolock)
+{
+ TDB_CONTEXT *tdb;
+ TDB_CONTEXT *tdb_new;
+ char *tmp_name;
+ struct stat st;
+ int count1, count2;
+
+ tmp_name = add_suffix(new_name, ".tmp");
+
+ /* stat the old tdb to find its permissions */
+ if (stat(old_name, &st) != 0) {
+ perror(old_name);
+ free(tmp_name);
+ return 1;
+ }
+
+ /* open the old tdb */
+ tdb = tdb_open_ex(old_name, 0,
+ TDB_DEFAULT | (nolock ? TDB_NOLOCK : 0),
+ O_RDWR, 0, &log_ctx, NULL);
+ if (!tdb) {
+ printf("Failed to open %s\n", old_name);
+ free(tmp_name);
+ return 1;
+ }
+
+ /* create the new tdb */
+ unlink(tmp_name);
+ tdb_new = tdb_open_ex(tmp_name,
+ hash_size ? hash_size : tdb_hash_size(tdb),
+ TDB_DEFAULT,
+ O_RDWR|O_CREAT|O_EXCL, st.st_mode & 0777,
+ &log_ctx, NULL);
+ if (!tdb_new) {
+ perror(tmp_name);
+ free(tmp_name);
+ return 1;
+ }
+
+ if (tdb_transaction_start(tdb) != 0) {
+ printf("Failed to start transaction on old tdb\n");
+ tdb_close(tdb);
+ tdb_close(tdb_new);
+ unlink(tmp_name);
+ free(tmp_name);
+ return 1;
+ }
+
+ /* lock the backup tdb so that nobody else can change it */
+ if (tdb_lockall(tdb_new) != 0) {
+ printf("Failed to lock backup tdb\n");
+ tdb_close(tdb);
+ tdb_close(tdb_new);
+ unlink(tmp_name);
+ free(tmp_name);
+ return 1;
+ }
+
+ failed = 0;
+
+ /* traverse and copy */
+ count1 = tdb_traverse(tdb, copy_fn, (void *)tdb_new);
+ if (count1 < 0 || failed) {
+ fprintf(stderr,"failed to copy %s\n", old_name);
+ tdb_close(tdb);
+ tdb_close(tdb_new);
+ unlink(tmp_name);
+ free(tmp_name);
+ return 1;
+ }
+
+ /* close the old tdb */
+ tdb_close(tdb);
+
+ /* copy done, unlock the backup tdb */
+ tdb_unlockall(tdb_new);
+
+#ifdef HAVE_FDATASYNC
+ if (fdatasync(tdb_fd(tdb_new)) != 0) {
+#else
+ if (fsync(tdb_fd(tdb_new)) != 0) {
+#endif
+ /* not fatal */
+ fprintf(stderr, "failed to fsync backup file\n");
+ }
+
+ /* close the new tdb and re-open read-only */
+ tdb_close(tdb_new);
+ tdb_new = tdb_open_ex(tmp_name,
+ 0,
+ TDB_DEFAULT,
+ O_RDONLY, 0,
+ &log_ctx, NULL);
+ if (!tdb_new) {
+ fprintf(stderr,"failed to reopen %s\n", tmp_name);
+ unlink(tmp_name);
+ perror(tmp_name);
+ free(tmp_name);
+ return 1;
+ }
+
+ /* traverse the new tdb to confirm */
+ count2 = tdb_traverse(tdb_new, test_fn, NULL);
+ if (count2 != count1) {
+ fprintf(stderr,"failed to copy %s\n", old_name);
+ tdb_close(tdb_new);
+ unlink(tmp_name);
+ free(tmp_name);
+ return 1;
+ }
+
+ /* close the new tdb and rename it to .bak */
+ tdb_close(tdb_new);
+ if (rename(tmp_name, new_name) != 0) {
+ perror(new_name);
+ free(tmp_name);
+ return 1;
+ }
+
+ free(tmp_name);
+
+ return 0;
+}
+
+/*
+ verify a tdb and if it is corrupt then restore from *.bak
+*/
+static int verify_tdb(const char *fname, const char *bak_name)
+{
+ TDB_CONTEXT *tdb;
+ int count = -1;
+
+ /* open the tdb */
+ tdb = tdb_open_ex(fname, 0, 0,
+ O_RDONLY, 0, &log_ctx, NULL);
+
+ /* traverse the tdb, then close it */
+ if (tdb) {
+ count = tdb_traverse(tdb, test_fn, NULL);
+ tdb_close(tdb);
+ }
+
+ /* count is < 0 means an error */
+ if (count < 0) {
+ printf("restoring %s\n", fname);
+ return backup_tdb(bak_name, fname, 0, 0);
+ }
+
+ printf("%s : %d records\n", fname, count);
+
+ return 0;
+}
+
+/*
+ see if one file is newer than another
+*/
+static int file_newer(const char *fname1, const char *fname2)
+{
+ struct stat st1, st2;
+ if (stat(fname1, &st1) != 0) {
+ return 0;
+ }
+ if (stat(fname2, &st2) != 0) {
+ return 1;
+ }
+ return (st1.st_mtime > st2.st_mtime);
+}
+
+static void usage(void)
+{
+ printf("Usage: tdbbackup [options] <fname...>\n\n");
+ printf(" -h this help message\n");
+ printf(" -s suffix set the backup suffix\n");
+ printf(" -v verify mode (restore if corrupt)\n");
+ printf(" -n hashsize set the new hash size for the backup\n");
+ printf(" -l open without locking to back up mutex dbs\n");
+}
+
+ int main(int argc, char *argv[])
+{
+ int i;
+ int ret = 0;
+ int c;
+ int verify = 0;
+ int hashsize = 0;
+ int nolock = 0;
+ const char *suffix = ".bak";
+
+ log_ctx.log_fn = tdb_log;
+
+ while ((c = getopt(argc, argv, "vhs:n:l")) != -1) {
+ switch (c) {
+ case 'h':
+ usage();
+ exit(0);
+ case 'v':
+ verify = 1;
+ break;
+ case 's':
+ suffix = optarg;
+ break;
+ case 'n':
+ hashsize = atoi(optarg);
+ break;
+ case 'l':
+ nolock = 1;
+ break;
+ }
+ }
+
+ argc -= optind;
+ argv += optind;
+
+ if (argc < 1) {
+ usage();
+ exit(1);
+ }
+
+ for (i=0; i<argc; i++) {
+ const char *fname = argv[i];
+ char *bak_name;
+
+ bak_name = add_suffix(fname, suffix);
+
+ if (verify) {
+ if (verify_tdb(fname, bak_name) != 0) {
+ ret = 1;
+ }
+ } else {
+ if (file_newer(fname, bak_name) &&
+ backup_tdb(fname, bak_name, hashsize,
+ nolock) != 0) {
+ ret = 1;
+ }
+ }
+
+ free(bak_name);
+ }
+
+ return ret;
+}
diff --git a/tools/tdbdump.c b/tools/tdbdump.c
new file mode 100644
index 0000000..9a0a7fe
--- /dev/null
+++ b/tools/tdbdump.c
@@ -0,0 +1,176 @@
+/*
+ Unix SMB/CIFS implementation.
+ simple tdb dump util
+ Copyright (C) Andrew Tridgell 2001
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "system/locale.h"
+#include "system/time.h"
+#include "system/filesys.h"
+#include "system/wait.h"
+#include "tdb.h"
+
+static void print_data(TDB_DATA d)
+{
+ unsigned char *p = (unsigned char *)d.dptr;
+ int len = d.dsize;
+ while (len--) {
+ if (isprint(*p) && !strchr("\"\\", *p)) {
+ fputc(*p, stdout);
+ } else {
+ printf("\\%02X", *p);
+ }
+ p++;
+ }
+}
+
+static int traverse_fn(TDB_CONTEXT *tdb, TDB_DATA key, TDB_DATA dbuf, void *state)
+{
+ printf("{\n");
+ printf("key(%d) = \"", (int)key.dsize);
+ print_data(key);
+ printf("\"\n");
+ printf("data(%d) = \"", (int)dbuf.dsize);
+ print_data(dbuf);
+ printf("\"\n");
+ printf("}\n");
+ return 0;
+}
+
+static void log_stderr(struct tdb_context *tdb, enum tdb_debug_level level,
+ const char *fmt, ...)
+{
+ va_list ap;
+ const char *name = tdb_name(tdb);
+ const char *prefix = "";
+
+ if (!name)
+ name = "unnamed";
+
+ switch (level) {
+ case TDB_DEBUG_ERROR:
+ prefix = "ERROR: ";
+ break;
+ case TDB_DEBUG_WARNING:
+ prefix = "WARNING: ";
+ break;
+ case TDB_DEBUG_TRACE:
+ return;
+
+ default:
+ case TDB_DEBUG_FATAL:
+ prefix = "FATAL: ";
+ break;
+ }
+
+ va_start(ap, fmt);
+ fprintf(stderr, "tdb(%s): %s", name, prefix);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+
+static void emergency_walk(TDB_DATA key, TDB_DATA dbuf, void *keyname)
+{
+ if (keyname) {
+ if (key.dsize != strlen(keyname))
+ return;
+ if (memcmp(key.dptr, keyname, key.dsize) != 0)
+ return;
+ }
+ traverse_fn(NULL, key, dbuf, NULL);
+}
+
+static int dump_tdb(const char *fname, const char *keyname, bool emergency)
+{
+ TDB_CONTEXT *tdb;
+ TDB_DATA key, value;
+ struct tdb_logging_context logfn = { log_stderr };
+ int tdb_flags = TDB_DEFAULT;
+
+ /*
+ * Note: that O_RDONLY implies TDB_NOLOCK, but we want to make it
+ * explicit as it's important when working on databases which were
+ * created with mutex locking.
+ */
+ tdb_flags |= TDB_NOLOCK;
+
+ tdb = tdb_open_ex(fname, 0, tdb_flags, O_RDONLY, 0, &logfn, NULL);
+ if (!tdb) {
+ printf("Failed to open %s\n", fname);
+ return 1;
+ }
+
+ if (emergency) {
+ return tdb_rescue(tdb, emergency_walk, discard_const(keyname)) == 0;
+ }
+ if (!keyname) {
+ return tdb_traverse(tdb, traverse_fn, NULL) == -1 ? 1 : 0;
+ } else {
+ key.dptr = discard_const_p(uint8_t, keyname);
+ key.dsize = strlen(keyname);
+ value = tdb_fetch(tdb, key);
+ if (!value.dptr) {
+ return 1;
+ } else {
+ print_data(value);
+ free(value.dptr);
+ }
+ }
+
+ return 0;
+}
+
+static void usage( void)
+{
+ printf( "Usage: tdbdump [options] <filename>\n\n");
+ printf( " -h this help message\n");
+ printf( " -k keyname dumps value of keyname\n");
+ printf( " -e emergency dump, for corrupt databases\n");
+}
+
+ int main(int argc, char *argv[])
+{
+ char *fname, *keyname=NULL;
+ bool emergency = false;
+ int c;
+
+ if (argc < 2) {
+ printf("Usage: tdbdump <fname>\n");
+ exit(1);
+ }
+
+ while ((c = getopt( argc, argv, "hk:e")) != -1) {
+ switch (c) {
+ case 'h':
+ usage();
+ exit( 0);
+ case 'k':
+ keyname = optarg;
+ break;
+ case 'e':
+ emergency = true;
+ break;
+ default:
+ usage();
+ exit( 1);
+ }
+ }
+
+ fname = argv[optind];
+
+ return dump_tdb(fname, keyname, emergency);
+}
diff --git a/tools/tdbrestore.c b/tools/tdbrestore.c
new file mode 100644
index 0000000..f65b36f
--- /dev/null
+++ b/tools/tdbrestore.c
@@ -0,0 +1,222 @@
+/*
+ tdbrestore -- construct a tdb from tdbdump output.
+ Copyright (C) Volker Lendecke 2010
+ Copyright (C) Simon McVittie 2005
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <assert.h>
+#include "replace.h"
+#include "system/locale.h"
+#include "system/time.h"
+#include "system/filesys.h"
+#include "system/wait.h"
+#include "tdb.h"
+
+static int read_linehead(FILE *f)
+{
+ int i, c;
+ int num_bytes;
+ char prefix[128];
+
+ while (1) {
+ c = getc(f);
+ if (c == EOF) {
+ return -1;
+ }
+ if (c == '(') {
+ break;
+ }
+ }
+ for (i=0; i<sizeof(prefix); i++) {
+ c = getc(f);
+ if (c == EOF) {
+ return -1;
+ }
+ prefix[i] = c;
+ if (c == '"') {
+ break;
+ }
+ }
+ if (i == sizeof(prefix)) {
+ return -1;
+ }
+ prefix[i] = '\0';
+
+ if (sscanf(prefix, "%d) = ", &num_bytes) != 1) {
+ return -1;
+ }
+ return num_bytes;
+}
+
+static int read_hex(void) {
+ int c;
+ c = getchar();
+ if (c == EOF) {
+ fprintf(stderr, "Unexpected EOF in data\n");
+ return -1;
+ } else if (c == '"') {
+ fprintf(stderr, "Unexpected \\\" sequence\n");
+ return -1;
+ } else if ('0' <= c && c <= '9') {
+ return c - '0';
+ } else if ('A' <= c && c <= 'F') {
+ return c - 'A' + 10;
+ } else if ('a' <= c && c <= 'f') {
+ return c - 'a' + 10;
+ } else {
+ fprintf(stderr, "Invalid hex: %c\n", c);
+ return -1;
+ }
+}
+
+static int read_data(FILE *f, TDB_DATA *d, size_t size) {
+ int c, low, high;
+ int i;
+
+ d->dptr = (unsigned char *)malloc(size);
+ if (d->dptr == NULL) {
+ return -1;
+ }
+ d->dsize = size;
+
+ for (i=0; i<size; i++) {
+ c = getc(f);
+ if (c == EOF) {
+ fprintf(stderr, "Unexpected EOF in data\n");
+ return 1;
+ } else if (c == '"') {
+ return 0;
+ } else if (c == '\\') {
+ high = read_hex();
+ if (high < 0) {
+ return -1;
+ }
+ high = high << 4;
+ assert(high == (high & 0xf0));
+ low = read_hex();
+ if (low < 0) {
+ return -1;
+ }
+ assert(low == (low & 0x0f));
+ d->dptr[i] = (low|high);
+ } else {
+ d->dptr[i] = c;
+ }
+ }
+ return 0;
+}
+
+static int swallow(FILE *f, const char *s, int *eof)
+{
+ char line[128];
+
+ if (fgets(line, sizeof(line), f) == NULL) {
+ if (eof != NULL) {
+ *eof = 1;
+ }
+ return -1;
+ }
+ if (strcmp(line, s) != 0) {
+ return -1;
+ }
+ return 0;
+}
+
+static int read_rec(FILE *f, TDB_CONTEXT *tdb, int *eof)
+{
+ int length;
+ TDB_DATA key, data;
+ int ret = -1;
+
+ key.dptr = NULL;
+ data.dptr = NULL;
+
+ if (swallow(f, "{\n", eof) == -1) {
+ goto fail;
+ }
+ length = read_linehead(f);
+ if (length == -1) {
+ goto fail;
+ }
+ if (read_data(f, &key, length) == -1) {
+ goto fail;
+ }
+ if (swallow(f, "\"\n", NULL) == -1) {
+ goto fail;
+ }
+ length = read_linehead(f);
+ if (length == -1) {
+ goto fail;
+ }
+ if (read_data(f, &data, length) == -1) {
+ goto fail;
+ }
+ if ((swallow(f, "\"\n", NULL) == -1)
+ || (swallow(f, "}\n", NULL) == -1)) {
+ goto fail;
+ }
+ if (tdb_store(tdb, key, data, TDB_INSERT) != 0) {
+ fprintf(stderr, "TDB error: %s\n", tdb_errorstr(tdb));
+ goto fail;
+ }
+
+ ret = 0;
+fail:
+ free(key.dptr);
+ free(data.dptr);
+ return ret;
+}
+
+static int restore_tdb(const char *fname)
+{
+ TDB_CONTEXT *tdb;
+
+ tdb = tdb_open(fname, 0, 0, O_RDWR|O_CREAT|O_EXCL, 0666);
+ if (!tdb) {
+ perror("tdb_open");
+ fprintf(stderr, "Failed to open %s\n", fname);
+ return 1;
+ }
+
+ while (1) {
+ int eof = 0;
+ if (read_rec(stdin, tdb, &eof) == -1) {
+ if (eof) {
+ break;
+ }
+ return 1;
+ }
+ }
+ if (tdb_close(tdb)) {
+ fprintf(stderr, "Error closing tdb\n");
+ return 1;
+ }
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ char *fname;
+
+ if (argc < 2) {
+ printf("Usage: %s dbname < tdbdump_output\n", argv[0]);
+ exit(1);
+ }
+
+ fname = argv[1];
+
+ return restore_tdb(fname);
+}
diff --git a/tools/tdbtest.c b/tools/tdbtest.c
new file mode 100644
index 0000000..0be35dc
--- /dev/null
+++ b/tools/tdbtest.c
@@ -0,0 +1,290 @@
+/* a test program for tdb - the trivial database */
+
+#include "replace.h"
+#include "tdb.h"
+#include "system/filesys.h"
+#include "system/time.h"
+
+#include <gdbm.h>
+
+
+#define DELETE_PROB 7
+#define STORE_PROB 5
+
+static struct tdb_context *db;
+static GDBM_FILE gdbm;
+
+struct timeval tp1,tp2;
+
+static void _start_timer(void)
+{
+ gettimeofday(&tp1,NULL);
+}
+
+static double _end_timer(void)
+{
+ gettimeofday(&tp2,NULL);
+ return((tp2.tv_sec - tp1.tv_sec) +
+ (tp2.tv_usec - tp1.tv_usec)*1.0e-6);
+}
+
+static void fatal(const char *why)
+{
+ perror(why);
+ exit(1);
+}
+
+#ifdef PRINTF_ATTRIBUTE
+static void tdb_log(struct tdb_context *tdb, int level, const char *format, ...) PRINTF_ATTRIBUTE(3,4);
+#endif
+static void tdb_log(struct tdb_context *tdb, int level, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ vfprintf(stdout, format, ap);
+ va_end(ap);
+ fflush(stdout);
+}
+
+static void compare_db(void)
+{
+ TDB_DATA d, key, nextkey;
+ datum gd, gkey, gnextkey;
+
+ key = tdb_firstkey(db);
+ while (key.dptr) {
+ d = tdb_fetch(db, key);
+ gkey.dptr = key.dptr;
+ gkey.dsize = key.dsize;
+
+ gd = gdbm_fetch(gdbm, gkey);
+
+ if (!gd.dptr) fatal("key not in gdbm");
+ if (gd.dsize != d.dsize) fatal("data sizes differ");
+ if (memcmp(gd.dptr, d.dptr, d.dsize)) {
+ fatal("data differs");
+ }
+
+ nextkey = tdb_nextkey(db, key);
+ free(key.dptr);
+ free(d.dptr);
+ free(gd.dptr);
+ key = nextkey;
+ }
+
+ gkey = gdbm_firstkey(gdbm);
+ while (gkey.dptr) {
+ gd = gdbm_fetch(gdbm, gkey);
+ key.dptr = gkey.dptr;
+ key.dsize = gkey.dsize;
+
+ d = tdb_fetch(db, key);
+
+ if (!d.dptr) fatal("key not in db");
+ if (d.dsize != gd.dsize) fatal("data sizes differ");
+ if (memcmp(d.dptr, gd.dptr, gd.dsize)) {
+ fatal("data differs");
+ }
+
+ gnextkey = gdbm_nextkey(gdbm, gkey);
+ free(gkey.dptr);
+ free(gd.dptr);
+ free(d.dptr);
+ gkey = gnextkey;
+ }
+}
+
+static char *randbuf(int len)
+{
+ char *buf;
+ int i;
+ buf = (char *)malloc(len+1);
+
+ for (i=0;i<len;i++) {
+ buf[i] = 'a' + (rand() % 26);
+ }
+ buf[i] = 0;
+ return buf;
+}
+
+static void addrec_db(void)
+{
+ int klen, dlen;
+ char *k, *d;
+ TDB_DATA key, data;
+
+ klen = 1 + (rand() % 4);
+ dlen = 1 + (rand() % 100);
+
+ k = randbuf(klen);
+ d = randbuf(dlen);
+
+ key.dptr = k;
+ key.dsize = klen+1;
+
+ data.dptr = d;
+ data.dsize = dlen+1;
+
+ if (rand() % DELETE_PROB == 0) {
+ tdb_delete(db, key);
+ } else if (rand() % STORE_PROB == 0) {
+ if (tdb_store(db, key, data, TDB_REPLACE) != 0) {
+ fatal("tdb_store failed");
+ }
+ } else {
+ data = tdb_fetch(db, key);
+ if (data.dptr) free(data.dptr);
+ }
+
+ free(k);
+ free(d);
+}
+
+static void addrec_gdbm(void)
+{
+ int klen, dlen;
+ char *k, *d;
+ datum key, data;
+
+ klen = 1 + (rand() % 4);
+ dlen = 1 + (rand() % 100);
+
+ k = randbuf(klen);
+ d = randbuf(dlen);
+
+ key.dptr = k;
+ key.dsize = klen+1;
+
+ data.dptr = d;
+ data.dsize = dlen+1;
+
+ if (rand() % DELETE_PROB == 0) {
+ gdbm_delete(gdbm, key);
+ } else if (rand() % STORE_PROB == 0) {
+ if (gdbm_store(gdbm, key, data, GDBM_REPLACE) != 0) {
+ fatal("gdbm_store failed");
+ }
+ } else {
+ data = gdbm_fetch(gdbm, key);
+ if (data.dptr) free(data.dptr);
+ }
+
+ free(k);
+ free(d);
+}
+
+static int traverse_fn(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf, void *state)
+{
+#if 0
+ printf("[%s] [%s]\n", key.dptr, dbuf.dptr);
+#endif
+ tdb_delete(tdb, key);
+ return 0;
+}
+
+static void merge_test(void)
+{
+ int i;
+ char keys[5][2];
+ char tdata[] = "test";
+ TDB_DATA key, data;
+
+ for (i = 0; i < 5; i++) {
+ snprintf(keys[i],2, "%d", i);
+ key.dptr = keys[i];
+ key.dsize = 2;
+
+ data.dptr = tdata;
+ data.dsize = 4;
+
+ if (tdb_store(db, key, data, TDB_REPLACE) != 0) {
+ fatal("tdb_store failed");
+ }
+ }
+
+ key.dptr = keys[0];
+ tdb_delete(db, key);
+ key.dptr = keys[4];
+ tdb_delete(db, key);
+ key.dptr = keys[2];
+ tdb_delete(db, key);
+ key.dptr = keys[1];
+ tdb_delete(db, key);
+ key.dptr = keys[3];
+ tdb_delete(db, key);
+}
+
+static char *test_path(const char *filename)
+{
+ const char *prefix = getenv("TEST_DATA_PREFIX");
+
+ if (prefix) {
+ char *path = NULL;
+ int ret;
+
+ ret = asprintf(&path, "%s/%s", prefix, filename);
+ if (ret == -1) {
+ return NULL;
+ }
+ return path;
+ }
+
+ return strdup(filename);
+}
+
+ int main(int argc, const char *argv[])
+{
+ int i, seed=0;
+ int loops = 10000;
+ int num_entries;
+ char test_gdbm[1] = "test.gdbm";
+ char *test_tdb;
+
+ test_gdbm[0] = test_path("test.gdbm");
+ test_tdb = test_path("test.tdb");
+
+ unlink(test_gdbm[0]);
+
+ db = tdb_open(test_tdb, 0, TDB_CLEAR_IF_FIRST,
+ O_RDWR | O_CREAT | O_TRUNC, 0600);
+ gdbm = gdbm_open(test_gdbm, 512, GDBM_WRITER|GDBM_NEWDB|GDBM_FAST,
+ 0600, NULL);
+
+ if (!db || !gdbm) {
+ fatal("db open failed");
+ }
+
+#if 1
+ srand(seed);
+ _start_timer();
+ for (i=0;i<loops;i++) addrec_gdbm();
+ printf("gdbm got %.2f ops/sec\n", i/_end_timer());
+#endif
+
+ merge_test();
+
+ srand(seed);
+ _start_timer();
+ for (i=0;i<loops;i++) addrec_db();
+ printf("tdb got %.2f ops/sec\n", i/_end_timer());
+
+ if (tdb_validate_freelist(db, &num_entries) == -1) {
+ printf("tdb freelist is corrupt\n");
+ } else {
+ printf("tdb freelist is good (%d entries)\n", num_entries);
+ }
+
+ compare_db();
+
+ printf("traversed %d records\n", tdb_traverse(db, traverse_fn, NULL));
+ printf("traversed %d records\n", tdb_traverse(db, traverse_fn, NULL));
+
+ tdb_close(db);
+ gdbm_close(gdbm);
+
+ free(test_gdbm[0]);
+ free(test_tdb);
+
+ return 0;
+}
diff --git a/tools/tdbtool.c b/tools/tdbtool.c
new file mode 100644
index 0000000..beb3af1
--- /dev/null
+++ b/tools/tdbtool.c
@@ -0,0 +1,858 @@
+/*
+ Unix SMB/CIFS implementation.
+ Samba database functions
+ Copyright (C) Andrew Tridgell 1999-2000
+ Copyright (C) Paul `Rusty' Russell 2000
+ Copyright (C) Jeremy Allison 2000
+ Copyright (C) Andrew Esh 2001
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "replace.h"
+#include "system/locale.h"
+#include "system/time.h"
+#include "system/filesys.h"
+#include "system/wait.h"
+#include "tdb.h"
+
+static int do_command(void);
+const char *cmdname;
+char *arg1, *arg2;
+size_t arg1len, arg2len;
+int bIterate = 0;
+char *line;
+TDB_DATA iterate_kbuf;
+char cmdline[1024];
+static int disable_mmap;
+static int disable_lock;
+
+enum commands {
+ CMD_CREATE_TDB,
+ CMD_OPEN_TDB,
+ CMD_TRANSACTION_START,
+ CMD_TRANSACTION_COMMIT,
+ CMD_TRANSACTION_CANCEL,
+ CMD_ERASE,
+ CMD_DUMP,
+ CMD_INSERT,
+ CMD_MOVE,
+ CMD_STORE,
+ CMD_SHOW,
+ CMD_KEYS,
+ CMD_HEXKEYS,
+ CMD_DELETE,
+ CMD_LIST_HASH_FREE,
+ CMD_LIST_FREE,
+ CMD_FREELIST_SIZE,
+ CMD_INFO,
+ CMD_MMAP,
+ CMD_SPEED,
+ CMD_FIRST,
+ CMD_NEXT,
+ CMD_SYSTEM,
+ CMD_CHECK,
+ CMD_REPACK,
+ CMD_QUIT,
+ CMD_HELP
+};
+
+typedef struct {
+ const char *name;
+ enum commands cmd;
+} COMMAND_TABLE;
+
+COMMAND_TABLE cmd_table[] = {
+ {"create", CMD_CREATE_TDB},
+ {"open", CMD_OPEN_TDB},
+ {"transaction_start", CMD_TRANSACTION_START},
+ {"transaction_commit", CMD_TRANSACTION_COMMIT},
+ {"transaction_cancel", CMD_TRANSACTION_CANCEL},
+ {"erase", CMD_ERASE},
+ {"dump", CMD_DUMP},
+ {"insert", CMD_INSERT},
+ {"move", CMD_MOVE},
+ {"store", CMD_STORE},
+ {"show", CMD_SHOW},
+ {"keys", CMD_KEYS},
+ {"hexkeys", CMD_HEXKEYS},
+ {"delete", CMD_DELETE},
+ {"list", CMD_LIST_HASH_FREE},
+ {"free", CMD_LIST_FREE},
+ {"freelist_size", CMD_FREELIST_SIZE},
+ {"info", CMD_INFO},
+ {"speed", CMD_SPEED},
+ {"mmap", CMD_MMAP},
+ {"first", CMD_FIRST},
+ {"1", CMD_FIRST},
+ {"next", CMD_NEXT},
+ {"n", CMD_NEXT},
+ {"check", CMD_CHECK},
+ {"quit", CMD_QUIT},
+ {"q", CMD_QUIT},
+ {"!", CMD_SYSTEM},
+ {"repack", CMD_REPACK},
+ {NULL, CMD_HELP}
+};
+
+struct timeval tp1,tp2;
+
+static void _start_timer(void)
+{
+ gettimeofday(&tp1,NULL);
+}
+
+static double _end_timer(void)
+{
+ gettimeofday(&tp2,NULL);
+ return((tp2.tv_sec - tp1.tv_sec) +
+ (tp2.tv_usec - tp1.tv_usec)*1.0e-6);
+}
+
+#ifdef PRINTF_ATTRIBUTE
+static void tdb_log_open(struct tdb_context *tdb, enum tdb_debug_level level,
+ const char *format, ...) PRINTF_ATTRIBUTE(3,4);
+#endif
+static void tdb_log_open(struct tdb_context *tdb, enum tdb_debug_level level,
+ const char *format, ...)
+{
+ const char *mutex_msg =
+ "Can use mutexes only with MUTEX_LOCKING or NOLOCK\n";
+ char *p;
+ va_list ap;
+
+ p = strstr(format, mutex_msg);
+ if (p != NULL) {
+ /*
+ * Yes, this is a hack, but we don't want to see this
+ * message on first open, but we want to see
+ * everything else.
+ */
+ return;
+ }
+
+ va_start(ap, format);
+ vfprintf(stderr, format, ap);
+ va_end(ap);
+}
+
+#ifdef PRINTF_ATTRIBUTE
+static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...) PRINTF_ATTRIBUTE(3,4);
+#endif
+static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ vfprintf(stderr, format, ap);
+ va_end(ap);
+}
+
+/* a tdb tool for manipulating a tdb database */
+
+static TDB_CONTEXT *tdb;
+
+static int print_rec(TDB_CONTEXT *the_tdb, TDB_DATA key, TDB_DATA dbuf, void *state);
+static int print_key(TDB_CONTEXT *the_tdb, TDB_DATA key, TDB_DATA dbuf, void *state);
+static int print_hexkey(TDB_CONTEXT *the_tdb, TDB_DATA key, TDB_DATA dbuf, void *state);
+
+static void print_asc(const char *buf,int len)
+{
+ int i;
+
+ /* We're probably printing ASCII strings so don't try to display
+ the trailing NULL character. */
+
+ if (buf[len - 1] == 0)
+ len--;
+
+ for (i=0;i<len;i++)
+ printf("%c",isprint(buf[i])?buf[i]:'.');
+}
+
+static void print_data(const char *buf,int len)
+{
+ int i=0;
+ if (len<=0) return;
+ printf("[%03X] ",i);
+ for (i=0;i<len;) {
+ printf("%02X ",(int)((unsigned char)buf[i]));
+ i++;
+ if (i%8 == 0) printf(" ");
+ if (i%16 == 0) {
+ print_asc(&buf[i-16],8); printf(" ");
+ print_asc(&buf[i-8],8); printf("\n");
+ if (i<len) printf("[%03X] ",i);
+ }
+ }
+ if (i%16) {
+ int n;
+
+ n = 16 - (i%16);
+ printf(" ");
+ if (n>8) printf(" ");
+ while (n--) printf(" ");
+
+ n = i%16;
+ if (n > 8) n = 8;
+ print_asc(&buf[i-(i%16)],n); printf(" ");
+ n = (i%16) - n;
+ if (n>0) print_asc(&buf[i-n],n);
+ printf("\n");
+ }
+}
+
+static void help(void)
+{
+ printf("\n"
+"tdbtool: \n"
+" create dbname : create a database\n"
+" open dbname : open an existing database\n"
+" transaction_start : start a transaction\n"
+" transaction_commit : commit a transaction\n"
+" transaction_cancel : cancel a transaction\n"
+" erase : erase the database\n"
+" dump : dump the database as strings\n"
+" keys : dump the database keys as strings\n"
+" hexkeys : dump the database keys as hex values\n"
+" info : print summary info about the database\n"
+" insert key data : insert a record\n"
+" move key file : move a record to a destination tdb\n"
+" store key data : store a record (replace)\n"
+" show key : show a record by key\n"
+" delete key : delete a record by key\n"
+" list : print the database hash table and freelist\n"
+" free : print the database freelist\n"
+" freelist_size : print the number of records in the freelist\n"
+" check : check the integrity of an opened database\n"
+" repack : repack the database\n"
+" speed : perform speed tests on the database\n"
+" ! command : execute system command\n"
+" 1 | first : print the first record\n"
+" n | next : print the next record\n"
+" q | quit : terminate\n"
+" \\n : repeat 'next' command\n"
+"\n");
+}
+
+static void terror(const char *why)
+{
+ printf("%s\n", why);
+}
+
+static void create_tdb(const char *tdbname)
+{
+ struct tdb_logging_context log_ctx = { NULL, NULL};
+ log_ctx.log_fn = tdb_log;
+
+ if (tdb) tdb_close(tdb);
+ tdb = tdb_open_ex(tdbname, 0,
+ TDB_CLEAR_IF_FIRST |
+ (disable_mmap?TDB_NOMMAP:0) |
+ (disable_lock?TDB_NOLOCK:0),
+ O_RDWR | O_CREAT | O_TRUNC, 0600, &log_ctx, NULL);
+ if (!tdb) {
+ printf("Could not create %s: %s\n", tdbname, strerror(errno));
+ }
+}
+
+static void open_tdb(const char *tdbname)
+{
+ struct tdb_logging_context log_ctx = { NULL, NULL };
+ log_ctx.log_fn = tdb_log_open;
+
+ if (tdb) tdb_close(tdb);
+ tdb = tdb_open_ex(tdbname, 0,
+ (disable_mmap?TDB_NOMMAP:0) |
+ (disable_lock?TDB_NOLOCK:0),
+ O_RDWR, 0600,
+ &log_ctx, NULL);
+
+ log_ctx.log_fn = tdb_log;
+ if (tdb != NULL) {
+ tdb_set_logging_function(tdb, &log_ctx);
+ }
+
+ if ((tdb == NULL) && (errno == EINVAL)) {
+ /*
+ * Retry NOLOCK and readonly. There we want to see all
+ * error messages.
+ */
+ tdb = tdb_open_ex(tdbname, 0,
+ (disable_mmap?TDB_NOMMAP:0) |TDB_NOLOCK,
+ O_RDONLY, 0600,
+ &log_ctx, NULL);
+ }
+
+ if (!tdb) {
+ printf("Could not open %s: %s\n", tdbname, strerror(errno));
+ }
+}
+
+static void insert_tdb(char *keyname, size_t keylen, char* data, size_t datalen)
+{
+ TDB_DATA key, dbuf;
+
+ if ((keyname == NULL) || (keylen == 0)) {
+ terror("need key");
+ return;
+ }
+
+ key.dptr = (unsigned char *)keyname;
+ key.dsize = keylen;
+ dbuf.dptr = (unsigned char *)data;
+ dbuf.dsize = datalen;
+
+ if (tdb_store(tdb, key, dbuf, TDB_INSERT) != 0) {
+ terror("insert failed");
+ }
+}
+
+static void store_tdb(char *keyname, size_t keylen, char* data, size_t datalen)
+{
+ TDB_DATA key, dbuf;
+
+ if ((keyname == NULL) || (keylen == 0)) {
+ terror("need key");
+ return;
+ }
+
+ if ((data == NULL) || (datalen == 0)) {
+ terror("need data");
+ return;
+ }
+
+ key.dptr = (unsigned char *)keyname;
+ key.dsize = keylen;
+ dbuf.dptr = (unsigned char *)data;
+ dbuf.dsize = datalen;
+
+ printf("Storing key:\n");
+ print_rec(tdb, key, dbuf, NULL);
+
+ if (tdb_store(tdb, key, dbuf, TDB_REPLACE) != 0) {
+ terror("store failed");
+ }
+}
+
+static void show_tdb(char *keyname, size_t keylen)
+{
+ TDB_DATA key, dbuf;
+
+ if ((keyname == NULL) || (keylen == 0)) {
+ terror("need key");
+ return;
+ }
+
+ key.dptr = (unsigned char *)keyname;
+ key.dsize = keylen;
+
+ dbuf = tdb_fetch(tdb, key);
+ if (!dbuf.dptr) {
+ terror("fetch failed");
+ return;
+ }
+
+ print_rec(tdb, key, dbuf, NULL);
+
+ free( dbuf.dptr );
+
+ return;
+}
+
+static void delete_tdb(char *keyname, size_t keylen)
+{
+ TDB_DATA key;
+
+ if ((keyname == NULL) || (keylen == 0)) {
+ terror("need key");
+ return;
+ }
+
+ key.dptr = (unsigned char *)keyname;
+ key.dsize = keylen;
+
+ if (tdb_delete(tdb, key) != 0) {
+ terror("delete failed");
+ }
+}
+
+static void move_rec(char *keyname, size_t keylen, char* tdbname)
+{
+ TDB_DATA key, dbuf;
+ TDB_CONTEXT *dst_tdb;
+
+ if ((keyname == NULL) || (keylen == 0)) {
+ terror("need key");
+ return;
+ }
+
+ if ( !tdbname ) {
+ terror("need destination tdb name");
+ return;
+ }
+
+ key.dptr = (unsigned char *)keyname;
+ key.dsize = keylen;
+
+ dbuf = tdb_fetch(tdb, key);
+ if (!dbuf.dptr) {
+ terror("fetch failed");
+ return;
+ }
+
+ print_rec(tdb, key, dbuf, NULL);
+
+ dst_tdb = tdb_open(tdbname, 0, 0, O_RDWR, 0600);
+ if ( !dst_tdb ) {
+ terror("unable to open destination tdb");
+ return;
+ }
+
+ if (tdb_store( dst_tdb, key, dbuf, TDB_REPLACE ) != 0) {
+ terror("failed to move record");
+ }
+ else
+ printf("record moved\n");
+
+ tdb_close( dst_tdb );
+
+ return;
+}
+
+static int print_rec(TDB_CONTEXT *the_tdb, TDB_DATA key, TDB_DATA dbuf, void *state)
+{
+ printf("\nkey %d bytes\n", (int)key.dsize);
+ print_asc((const char *)key.dptr, key.dsize);
+ printf("\ndata %d bytes\n", (int)dbuf.dsize);
+ print_data((const char *)dbuf.dptr, dbuf.dsize);
+ return 0;
+}
+
+static int print_key(TDB_CONTEXT *the_tdb, TDB_DATA key, TDB_DATA dbuf, void *state)
+{
+ printf("key %d bytes: ", (int)key.dsize);
+ print_asc((const char *)key.dptr, key.dsize);
+ printf("\n");
+ return 0;
+}
+
+static int print_hexkey(TDB_CONTEXT *the_tdb, TDB_DATA key, TDB_DATA dbuf, void *state)
+{
+ printf("key %d bytes\n", (int)key.dsize);
+ print_data((const char *)key.dptr, key.dsize);
+ printf("\n");
+ return 0;
+}
+
+static int total_bytes;
+
+static int traverse_fn(TDB_CONTEXT *the_tdb, TDB_DATA key, TDB_DATA dbuf, void *state)
+{
+ total_bytes += dbuf.dsize;
+ return 0;
+}
+
+static void info_tdb(void)
+{
+ char *summary = tdb_summary(tdb);
+
+ if (!summary) {
+ printf("Error = %s\n", tdb_errorstr(tdb));
+ } else {
+ printf("%s", summary);
+ free(summary);
+ }
+}
+
+static void speed_tdb(const char *tlimit)
+{
+ const char *str = "store test", *str2 = "transaction test";
+ unsigned timelimit = tlimit?atoi(tlimit):0;
+ double t;
+ int ops;
+ if (timelimit == 0) timelimit = 5;
+
+ ops = 0;
+ printf("Testing store speed for %u seconds\n", timelimit);
+ _start_timer();
+ do {
+ long int r = random();
+ TDB_DATA key, dbuf;
+ key.dptr = discard_const_p(uint8_t, str);
+ key.dsize = strlen((char *)key.dptr);
+ dbuf.dptr = (uint8_t *) &r;
+ dbuf.dsize = sizeof(r);
+ tdb_store(tdb, key, dbuf, TDB_REPLACE);
+ t = _end_timer();
+ ops++;
+ } while (t < timelimit);
+ printf("%10.3f ops/sec\n", ops/t);
+
+ ops = 0;
+ printf("Testing fetch speed for %u seconds\n", timelimit);
+ _start_timer();
+ do {
+ TDB_DATA key;
+ key.dptr = discard_const_p(uint8_t, str);
+ key.dsize = strlen((char *)key.dptr);
+ tdb_fetch(tdb, key);
+ t = _end_timer();
+ ops++;
+ } while (t < timelimit);
+ printf("%10.3f ops/sec\n", ops/t);
+
+ ops = 0;
+ printf("Testing transaction speed for %u seconds\n", timelimit);
+ _start_timer();
+ do {
+ long int r = random();
+ TDB_DATA key, dbuf;
+ key.dptr = discard_const_p(uint8_t, str2);
+ key.dsize = strlen((char *)key.dptr);
+ dbuf.dptr = (uint8_t *) &r;
+ dbuf.dsize = sizeof(r);
+ tdb_transaction_start(tdb);
+ tdb_store(tdb, key, dbuf, TDB_REPLACE);
+ tdb_transaction_commit(tdb);
+ t = _end_timer();
+ ops++;
+ } while (t < timelimit);
+ printf("%10.3f ops/sec\n", ops/t);
+
+ ops = 0;
+ printf("Testing traverse speed for %u seconds\n", timelimit);
+ _start_timer();
+ do {
+ tdb_traverse(tdb, traverse_fn, NULL);
+ t = _end_timer();
+ ops++;
+ } while (t < timelimit);
+ printf("%10.3f ops/sec\n", ops/t);
+}
+
+static void toggle_mmap(void)
+{
+ disable_mmap = !disable_mmap;
+ if (disable_mmap) {
+ printf("mmap is disabled\n");
+ } else {
+ printf("mmap is enabled\n");
+ }
+}
+
+static char *tdb_getline(const char *prompt)
+{
+ static char thisline[1024];
+ char *p;
+ fputs(prompt, stdout);
+ thisline[0] = 0;
+ p = fgets(thisline, sizeof(thisline)-1, stdin);
+ if (p) p = strchr(p, '\n');
+ if (p) *p = 0;
+ return p?thisline:NULL;
+}
+
+static int do_delete_fn(TDB_CONTEXT *the_tdb, TDB_DATA key, TDB_DATA dbuf,
+ void *state)
+{
+ return tdb_delete(the_tdb, key);
+}
+
+static void first_record(TDB_CONTEXT *the_tdb, TDB_DATA *pkey)
+{
+ TDB_DATA dbuf;
+ *pkey = tdb_firstkey(the_tdb);
+
+ dbuf = tdb_fetch(the_tdb, *pkey);
+ if (!dbuf.dptr) terror("fetch failed");
+ else {
+ print_rec(the_tdb, *pkey, dbuf, NULL);
+ }
+}
+
+static void next_record(TDB_CONTEXT *the_tdb, TDB_DATA *pkey)
+{
+ TDB_DATA dbuf;
+ *pkey = tdb_nextkey(the_tdb, *pkey);
+
+ dbuf = tdb_fetch(the_tdb, *pkey);
+ if (!dbuf.dptr)
+ terror("fetch failed");
+ else
+ print_rec(the_tdb, *pkey, dbuf, NULL);
+}
+
+static int count(TDB_DATA key, TDB_DATA data, void *private_data)
+{
+ (*(unsigned int *)private_data)++;
+ return 0;
+}
+
+static void check_db(TDB_CONTEXT *the_tdb)
+{
+ int tdbcount = 0;
+ if (!the_tdb)
+ printf("Error: No database opened!\n");
+ else if (tdb_check(the_tdb, count, &tdbcount) == -1)
+ printf("Integrity check for the opened database failed.\n");
+ else
+ printf("Database integrity is OK and has %d records.\n",
+ tdbcount);
+}
+
+static int do_command(void)
+{
+ COMMAND_TABLE *ctp = cmd_table;
+ enum commands mycmd = CMD_HELP;
+ int cmd_len;
+
+ if (cmdname && strlen(cmdname) == 0) {
+ mycmd = CMD_NEXT;
+ } else {
+ while (ctp->name) {
+ cmd_len = strlen(ctp->name);
+ if (strncmp(ctp->name,cmdname,cmd_len) == 0) {
+ mycmd = ctp->cmd;
+ break;
+ }
+ ctp++;
+ }
+ }
+
+ switch (mycmd) {
+ case CMD_CREATE_TDB:
+ bIterate = 0;
+ create_tdb(arg1);
+ return 0;
+ case CMD_OPEN_TDB:
+ bIterate = 0;
+ open_tdb(arg1);
+ return 0;
+ case CMD_SYSTEM:
+ /* Shell command */
+ if (system(arg1) == -1) {
+ terror("system() call failed\n");
+ }
+ return 0;
+ case CMD_QUIT:
+ return 1;
+ default:
+ /* all the rest require a open database */
+ if (!tdb) {
+ bIterate = 0;
+ terror("database not open");
+ help();
+ return 0;
+ }
+ switch (mycmd) {
+ case CMD_TRANSACTION_START:
+ bIterate = 0;
+ tdb_transaction_start(tdb);
+ return 0;
+ case CMD_TRANSACTION_COMMIT:
+ bIterate = 0;
+ tdb_transaction_commit(tdb);
+ return 0;
+ case CMD_REPACK:
+ bIterate = 0;
+ tdb_repack(tdb);
+ return 0;
+ case CMD_TRANSACTION_CANCEL:
+ bIterate = 0;
+ tdb_transaction_cancel(tdb);
+ return 0;
+ case CMD_ERASE:
+ bIterate = 0;
+ tdb_traverse(tdb, do_delete_fn, NULL);
+ return 0;
+ case CMD_DUMP:
+ bIterate = 0;
+ tdb_traverse(tdb, print_rec, NULL);
+ return 0;
+ case CMD_INSERT:
+ bIterate = 0;
+ insert_tdb(arg1, arg1len,arg2,arg2len);
+ return 0;
+ case CMD_MOVE:
+ bIterate = 0;
+ move_rec(arg1,arg1len,arg2);
+ return 0;
+ case CMD_STORE:
+ bIterate = 0;
+ store_tdb(arg1,arg1len,arg2,arg2len);
+ return 0;
+ case CMD_SHOW:
+ bIterate = 0;
+ show_tdb(arg1, arg1len);
+ return 0;
+ case CMD_KEYS:
+ tdb_traverse(tdb, print_key, NULL);
+ return 0;
+ case CMD_HEXKEYS:
+ tdb_traverse(tdb, print_hexkey, NULL);
+ return 0;
+ case CMD_DELETE:
+ bIterate = 0;
+ delete_tdb(arg1,arg1len);
+ return 0;
+ case CMD_LIST_HASH_FREE:
+ tdb_dump_all(tdb);
+ return 0;
+ case CMD_LIST_FREE:
+ tdb_printfreelist(tdb);
+ return 0;
+ case CMD_FREELIST_SIZE: {
+ int size;
+
+ size = tdb_freelist_size(tdb);
+ if (size < 0) {
+ printf("Error getting freelist size.\n");
+ } else {
+ printf("freelist size: %d\n", size);
+ }
+
+ return 0;
+ }
+ case CMD_INFO:
+ info_tdb();
+ return 0;
+ case CMD_SPEED:
+ speed_tdb(arg1);
+ return 0;
+ case CMD_MMAP:
+ toggle_mmap();
+ return 0;
+ case CMD_FIRST:
+ bIterate = 1;
+ first_record(tdb, &iterate_kbuf);
+ return 0;
+ case CMD_NEXT:
+ if (bIterate)
+ next_record(tdb, &iterate_kbuf);
+ return 0;
+ case CMD_CHECK:
+ check_db(tdb);
+ return 0;
+ case CMD_HELP:
+ help();
+ return 0;
+ case CMD_CREATE_TDB:
+ case CMD_OPEN_TDB:
+ case CMD_SYSTEM:
+ case CMD_QUIT:
+ /*
+ * unhandled commands. cases included here to avoid compiler
+ * warnings.
+ */
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static char *tdb_convert_string(char *instring, size_t *sizep)
+{
+ size_t length = 0;
+ char *outp, *inp;
+ char temp[3];
+
+ outp = inp = instring;
+
+ while (*inp) {
+ if (*inp == '\\') {
+ inp++;
+ if (*inp && strchr("0123456789abcdefABCDEF",(int)*inp)) {
+ temp[0] = *inp++;
+ temp[1] = '\0';
+ if (*inp && strchr("0123456789abcdefABCDEF",(int)*inp)) {
+ temp[1] = *inp++;
+ temp[2] = '\0';
+ }
+ *outp++ = (char)strtol((const char *)temp,NULL,16);
+ } else {
+ *outp++ = *inp++;
+ }
+ } else {
+ *outp++ = *inp++;
+ }
+ length++;
+ }
+ *sizep = length;
+ return instring;
+}
+
+int main(int argc, char *argv[])
+{
+ cmdname = "";
+ arg1 = NULL;
+ arg1len = 0;
+ arg2 = NULL;
+ arg2len = 0;
+
+ if (argv[1] && (strcmp(argv[1], "-l") == 0)) {
+ disable_lock = 1;
+ argv[1] = argv[0];
+ argv += 1;
+ argc -= 1;
+ }
+
+ if (argv[1]) {
+ cmdname = "open";
+ arg1 = argv[1];
+ do_command();
+ cmdname = "";
+ arg1 = NULL;
+ }
+
+ switch (argc) {
+ case 1:
+ case 2:
+ /* Interactive mode */
+ while ((cmdname = tdb_getline("tdb> "))) {
+ arg2 = arg1 = NULL;
+ if ((arg1 = strchr((const char *)cmdname,' ')) != NULL) {
+ arg1++;
+ arg2 = arg1;
+ while (*arg2) {
+ if (*arg2 == ' ') {
+ *arg2++ = '\0';
+ break;
+ }
+ if ((*arg2++ == '\\') && (*arg2 == ' ')) {
+ arg2++;
+ }
+ }
+ }
+ if (arg1) arg1 = tdb_convert_string(arg1,&arg1len);
+ if (arg2) arg2 = tdb_convert_string(arg2,&arg2len);
+ if (do_command()) break;
+ }
+ break;
+ case 5:
+ arg2 = tdb_convert_string(argv[4],&arg2len);
+ case 4:
+ arg1 = tdb_convert_string(argv[3],&arg1len);
+ case 3:
+ cmdname = argv[2];
+ default:
+ do_command();
+ break;
+ }
+
+ if (tdb) tdb_close(tdb);
+
+ return 0;
+}
diff --git a/tools/tdbtorture.c b/tools/tdbtorture.c
new file mode 100644
index 0000000..e4b8f69
--- /dev/null
+++ b/tools/tdbtorture.c
@@ -0,0 +1,482 @@
+/* this tests tdb by doing lots of ops from several simultaneous
+ writers - that stresses the locking code.
+*/
+
+#include "replace.h"
+#include "system/time.h"
+#include "system/wait.h"
+#include "system/filesys.h"
+#include "tdb.h"
+
+#ifdef HAVE_GETOPT_H
+#include <getopt.h>
+#endif
+
+
+#define REOPEN_PROB 30
+#define DELETE_PROB 8
+#define STORE_PROB 4
+#define APPEND_PROB 6
+#define TRANSACTION_PROB 10
+#define TRANSACTION_PREPARE_PROB 2
+#define LOCKSTORE_PROB 5
+#define TRAVERSE_PROB 20
+#define TRAVERSE_READ_PROB 20
+#define CULL_PROB 100
+#define KEYLEN 3
+#define DATALEN 100
+
+static struct tdb_context *db;
+static int in_transaction;
+static int error_count;
+static int always_transaction = 0;
+static int hash_size = 2;
+static int loopnum;
+static int count_pipe;
+static bool mutex = false;
+static struct tdb_logging_context log_ctx;
+
+#ifdef PRINTF_ATTRIBUTE
+static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...) PRINTF_ATTRIBUTE(3,4);
+#endif
+static void tdb_log(struct tdb_context *tdb, enum tdb_debug_level level, const char *format, ...)
+{
+ va_list ap;
+
+ /* trace level messages do not indicate an error */
+ if (level != TDB_DEBUG_TRACE) {
+ error_count++;
+ }
+
+ va_start(ap, format);
+ vfprintf(stdout, format, ap);
+ va_end(ap);
+ fflush(stdout);
+#if 0
+ if (level != TDB_DEBUG_TRACE) {
+ char *ptr;
+ signal(SIGUSR1, SIG_IGN);
+ asprintf(&ptr,"xterm -e gdb /proc/%d/exe %d", getpid(), getpid());
+ system(ptr);
+ free(ptr);
+ }
+#endif
+}
+
+static void fatal(const char *why)
+{
+ perror(why);
+ error_count++;
+}
+
+static char *randbuf(int len)
+{
+ char *buf;
+ int i;
+ buf = (char *)malloc(len+1);
+
+ for (i=0;i<len;i++) {
+ buf[i] = 'a' + (rand() % 26);
+ }
+ buf[i] = 0;
+ return buf;
+}
+
+static int cull_traverse(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf,
+ void *state)
+{
+#if CULL_PROB
+ if (random() % CULL_PROB == 0) {
+ tdb_delete(tdb, key);
+ }
+#endif
+ return 0;
+}
+
+static void addrec_db(void)
+{
+ int klen, dlen;
+ char *k, *d;
+ TDB_DATA key, data;
+
+ klen = 1 + (rand() % KEYLEN);
+ dlen = 1 + (rand() % DATALEN);
+
+ k = randbuf(klen);
+ d = randbuf(dlen);
+
+ key.dptr = (unsigned char *)k;
+ key.dsize = klen+1;
+
+ data.dptr = (unsigned char *)d;
+ data.dsize = dlen+1;
+
+#if REOPEN_PROB
+ if (in_transaction == 0 && random() % REOPEN_PROB == 0) {
+ tdb_reopen_all(0);
+ goto next;
+ }
+#endif
+
+#if TRANSACTION_PROB
+ if (in_transaction == 0 &&
+ (always_transaction || random() % TRANSACTION_PROB == 0)) {
+ if (tdb_transaction_start(db) != 0) {
+ fatal("tdb_transaction_start failed");
+ }
+ in_transaction++;
+ goto next;
+ }
+ if (in_transaction && random() % TRANSACTION_PROB == 0) {
+ if (random() % TRANSACTION_PREPARE_PROB == 0) {
+ if (tdb_transaction_prepare_commit(db) != 0) {
+ fatal("tdb_transaction_prepare_commit failed");
+ }
+ }
+ if (tdb_transaction_commit(db) != 0) {
+ fatal("tdb_transaction_commit failed");
+ }
+ in_transaction--;
+ goto next;
+ }
+ if (in_transaction && random() % TRANSACTION_PROB == 0) {
+ if (tdb_transaction_cancel(db) != 0) {
+ fatal("tdb_transaction_cancel failed");
+ }
+ in_transaction--;
+ goto next;
+ }
+#endif
+
+#if DELETE_PROB
+ if (random() % DELETE_PROB == 0) {
+ tdb_delete(db, key);
+ goto next;
+ }
+#endif
+
+#if STORE_PROB
+ if (random() % STORE_PROB == 0) {
+ if (tdb_store(db, key, data, TDB_REPLACE) != 0) {
+ fatal("tdb_store failed");
+ }
+ goto next;
+ }
+#endif
+
+#if APPEND_PROB
+ if (random() % APPEND_PROB == 0) {
+ if (tdb_append(db, key, data) != 0) {
+ fatal("tdb_append failed");
+ }
+ goto next;
+ }
+#endif
+
+#if LOCKSTORE_PROB
+ if (random() % LOCKSTORE_PROB == 0) {
+ tdb_chainlock(db, key);
+ data = tdb_fetch(db, key);
+ if (tdb_store(db, key, data, TDB_REPLACE) != 0) {
+ fatal("tdb_store failed");
+ }
+ if (data.dptr) free(data.dptr);
+ tdb_chainunlock(db, key);
+ goto next;
+ }
+#endif
+
+#if TRAVERSE_PROB
+ if (random() % TRAVERSE_PROB == 0) {
+ tdb_traverse(db, cull_traverse, NULL);
+ goto next;
+ }
+#endif
+
+#if TRAVERSE_READ_PROB
+ if (random() % TRAVERSE_READ_PROB == 0) {
+ tdb_traverse_read(db, NULL, NULL);
+ goto next;
+ }
+#endif
+
+ data = tdb_fetch(db, key);
+ if (data.dptr) free(data.dptr);
+
+next:
+ free(k);
+ free(d);
+}
+
+static int traverse_fn(struct tdb_context *tdb, TDB_DATA key, TDB_DATA dbuf,
+ void *state)
+{
+ tdb_delete(tdb, key);
+ return 0;
+}
+
+static void usage(void)
+{
+ printf("Usage: tdbtorture [-t] [-k] [-m] [-n NUM_PROCS] [-l NUM_LOOPS] [-s SEED] [-H HASH_SIZE]\n");
+ exit(0);
+}
+
+static void send_count_and_suicide(int sig)
+{
+ /* This ensures our successor can continue where we left off. */
+ write(count_pipe, &loopnum, sizeof(loopnum));
+ /* This gives a unique signature. */
+ kill(getpid(), SIGUSR2);
+}
+
+static int run_child(const char *filename, int i, int seed, unsigned num_loops, unsigned start)
+{
+ int tdb_flags = TDB_DEFAULT|TDB_CLEAR_IF_FIRST|TDB_INCOMPATIBLE_HASH;
+
+ if (mutex) {
+ tdb_flags |= TDB_MUTEX_LOCKING;
+ }
+
+ db = tdb_open_ex(filename, hash_size, tdb_flags,
+ O_RDWR | O_CREAT, 0600, &log_ctx, NULL);
+ if (!db) {
+ fatal("db open failed");
+ }
+
+ srand(seed + i);
+ srandom(seed + i);
+
+ /* Set global, then we're ready to handle being killed. */
+ loopnum = start;
+ signal(SIGUSR1, send_count_and_suicide);
+
+ for (;loopnum<num_loops && error_count == 0;loopnum++) {
+ addrec_db();
+ }
+
+ if (error_count == 0) {
+ tdb_traverse_read(db, NULL, NULL);
+ if (always_transaction) {
+ while (in_transaction) {
+ tdb_transaction_cancel(db);
+ in_transaction--;
+ }
+ if (tdb_transaction_start(db) != 0)
+ fatal("tdb_transaction_start failed");
+ }
+ tdb_traverse(db, traverse_fn, NULL);
+ tdb_traverse(db, traverse_fn, NULL);
+ if (always_transaction) {
+ if (tdb_transaction_commit(db) != 0)
+ fatal("tdb_transaction_commit failed");
+ }
+ }
+
+ tdb_close(db);
+
+ return (error_count < 100 ? error_count : 100);
+}
+
+static char *test_path(const char *filename)
+{
+ const char *prefix = getenv("TEST_DATA_PREFIX");
+
+ if (prefix) {
+ char *path = NULL;
+ int ret;
+
+ ret = asprintf(&path, "%s/%s", prefix, filename);
+ if (ret == -1) {
+ return NULL;
+ }
+ return path;
+ }
+
+ return strdup(filename);
+}
+
+int main(int argc, char * const *argv)
+{
+ int i, seed = -1;
+ int num_loops = 5000;
+ int num_procs = 3;
+ int c, pfds[2];
+ extern char *optarg;
+ pid_t *pids;
+ int kill_random = 0;
+ int *done;
+ char *test_tdb;
+
+ log_ctx.log_fn = tdb_log;
+
+ while ((c = getopt(argc, argv, "n:l:s:H:thkm")) != -1) {
+ switch (c) {
+ case 'n':
+ num_procs = strtol(optarg, NULL, 0);
+ break;
+ case 'l':
+ num_loops = strtol(optarg, NULL, 0);
+ break;
+ case 'H':
+ hash_size = strtol(optarg, NULL, 0);
+ break;
+ case 's':
+ seed = strtol(optarg, NULL, 0);
+ break;
+ case 't':
+ always_transaction = 1;
+ break;
+ case 'k':
+ kill_random = 1;
+ break;
+ case 'm':
+ mutex = tdb_runtime_check_for_robust_mutexes();
+ if (!mutex) {
+ printf("tdb_runtime_check_for_robust_mutexes() returned false\n");
+ exit(1);
+ }
+ break;
+ default:
+ usage();
+ }
+ }
+
+ test_tdb = test_path("torture.tdb");
+
+ unlink(test_tdb);
+
+ if (seed == -1) {
+ seed = (getpid() + time(NULL)) & 0x7FFFFFFF;
+ }
+
+ printf("Testing with %d processes, %d loops, %d hash_size, seed=%d%s\n",
+ num_procs, num_loops, hash_size, seed,
+ (always_transaction ? " (all within transactions)" : ""));
+
+ if (num_procs == 1 && !kill_random) {
+ /* Don't fork for this case, makes debugging easier. */
+ error_count = run_child(test_tdb, 0, seed, num_loops, 0);
+ goto done;
+ }
+
+ pids = (pid_t *)calloc(sizeof(pid_t), num_procs);
+ if (pids == NULL) {
+ perror("Unable to allocate memory for pids");
+ exit(1);
+ }
+ done = (int *)calloc(sizeof(int), num_procs);
+ if (done == NULL) {
+ perror("Unable to allocate memory for done");
+ exit(1);
+ }
+
+ if (pipe(pfds) != 0) {
+ perror("Creating pipe");
+ exit(1);
+ }
+ count_pipe = pfds[1];
+
+ for (i=0;i<num_procs;i++) {
+ if ((pids[i]=fork()) == 0) {
+ close(pfds[0]);
+ exit(run_child(test_tdb, i, seed, num_loops, 0));
+ }
+ }
+
+ while (num_procs) {
+ int status, j;
+ pid_t pid;
+
+ if (error_count != 0) {
+ /* try and stop the test on any failure */
+ for (j=0;j<num_procs;j++) {
+ if (pids[j] != 0) {
+ kill(pids[j], SIGTERM);
+ }
+ }
+ }
+
+ pid = waitpid(-1, &status, kill_random ? WNOHANG : 0);
+ if (pid == 0) {
+ struct timeval tv;
+
+ /* Sleep for 1/10 second. */
+ tv.tv_sec = 0;
+ tv.tv_usec = 100000;
+ select(0, NULL, NULL, NULL, &tv);
+
+ /* Kill someone. */
+ kill(pids[random() % num_procs], SIGUSR1);
+ continue;
+ }
+
+ if (pid == -1) {
+ perror("failed to wait for child\n");
+ exit(1);
+ }
+
+ for (j=0;j<num_procs;j++) {
+ if (pids[j] == pid) break;
+ }
+ if (j == num_procs) {
+ printf("unknown child %d exited!?\n", (int)pid);
+ exit(1);
+ }
+ if (WIFSIGNALED(status)) {
+ if (WTERMSIG(status) == SIGUSR2
+ || WTERMSIG(status) == SIGUSR1) {
+ /* SIGUSR2 means they wrote to pipe. */
+ if (WTERMSIG(status) == SIGUSR2) {
+ read(pfds[0], &done[j],
+ sizeof(done[j]));
+ }
+ pids[j] = fork();
+ if (pids[j] == 0)
+ exit(run_child(test_tdb, j, seed,
+ num_loops, done[j]));
+ printf("Restarting child %i for %u-%u\n",
+ j, done[j], num_loops);
+ continue;
+ }
+ printf("child %d exited with signal %d\n",
+ (int)pid, WTERMSIG(status));
+ error_count++;
+ } else {
+ if (WEXITSTATUS(status) != 0) {
+ printf("child %d exited with status %d\n",
+ (int)pid, WEXITSTATUS(status));
+ error_count++;
+ }
+ }
+ memmove(&pids[j], &pids[j+1],
+ (num_procs - j - 1)*sizeof(pids[0]));
+ num_procs--;
+ }
+
+ free(pids);
+
+done:
+ if (error_count == 0) {
+ int tdb_flags = TDB_DEFAULT;
+
+ if (mutex) {
+ tdb_flags |= TDB_NOLOCK;
+ }
+
+ db = tdb_open_ex(test_tdb, hash_size, tdb_flags,
+ O_RDWR, 0, &log_ctx, NULL);
+ if (!db) {
+ fatal("db open failed\n");
+ exit(1);
+ }
+ if (tdb_check(db, NULL, NULL) == -1) {
+ printf("db check failed\n");
+ exit(1);
+ }
+ tdb_close(db);
+ printf("OK\n");
+ }
+
+ free(test_tdb);
+ return error_count;
+}
diff --git a/web/index.html b/web/index.html
new file mode 100644
index 0000000..99e8a2f
--- /dev/null
+++ b/web/index.html
@@ -0,0 +1,48 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2//EN">
+<HTML>
+<HEAD>
+<TITLE>ldb</TITLE>
+</HEAD>
+<BODY BGCOLOR="#ffffff" TEXT="#000000" VLINK="#292555" LINK="#292555" ALINK="#cc0033">
+
+<h1>tdb</h1>
+
+TDB is a Trivial Database. In concept, it is very much like GDBM, and BSD's DB
+except that it allows multiple simultaneous writers and uses locking
+internally to keep writers from trampling on each other. TDB is also extremely
+small.
+
+<h2>Download</h2>
+You can download the latest releases of tdb from the <a
+href="http://samba.org/ftp/tdb">tdb directory</a> on the samba public
+source archive.
+
+
+<h2>Discussion and bug reports</h2>
+
+tdb does not currently have its own mailing list or bug tracking
+system. For now, please use the <a
+href="https://lists.samba.org/mailman/listinfo/samba-technical">samba-technical</a>
+mailing list, and the <a href="http://bugzilla.samba.org/">Samba
+bugzilla</a> bug tracking system.
+
+<h2>Download</h2>
+
+You can download the latest code either via git or rsync.<br>
+<br>
+To fetch via git see the following guide:<br>
+<a href="http://wiki.samba.org/index.php/Using_Git_for_Samba_Development">Using Git for Samba Development</a><br>
+Once you have cloned the tree switch to the master branch and cd into the source/lib/tdb directory.<br>
+<br>
+To fetch via rsync use these commands:
+
+<pre>
+ rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/tdb .
+ rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/replace .
+</pre>
+
+and build in tdb. It will find the replace library in the directory
+above automatically.
+
+</BODY>
+</HTML>
diff --git a/wscript b/wscript
new file mode 100644
index 0000000..b960bb9
--- /dev/null
+++ b/wscript
@@ -0,0 +1,243 @@
+#!/usr/bin/env python
+
+APPNAME = 'tdb'
+VERSION = '1.3.5'
+
+blddir = 'bin'
+
+import sys, os
+
+# find the buildtools directory
+srcdir = '.'
+while not os.path.exists(srcdir+'/buildtools') and len(srcdir.split('/')) < 5:
+ srcdir = srcdir + '/..'
+sys.path.insert(0, srcdir + '/buildtools/wafsamba')
+
+import wafsamba, samba_dist, Options, Logs
+
+samba_dist.DIST_DIRS('lib/tdb:. lib/replace:lib/replace buildtools:buildtools third_party/waf:third_party/waf')
+
+tdb1_unit_tests = [
+ 'run-3G-file',
+ 'run-bad-tdb-header',
+ 'run',
+ 'run-check',
+ 'run-corrupt',
+ 'run-die-during-transaction',
+ 'run-endian',
+ 'run-incompatible',
+ 'run-nested-transactions',
+ 'run-nested-traverse',
+ 'run-no-lock-during-traverse',
+ 'run-oldhash',
+ 'run-open-during-transaction',
+ 'run-readonly-check',
+ 'run-rescue',
+ 'run-rescue-find_entry',
+ 'run-rwlock-check',
+ 'run-summary',
+ 'run-transaction-expand',
+ 'run-traverse-in-transaction',
+ 'run-wronghash-fail',
+ 'run-zero-append',
+ 'run-marklock-deadlock',
+ 'run-mutex-openflags2',
+ 'run-mutex-trylock',
+ 'run-mutex-allrecord-bench',
+ 'run-mutex-allrecord-trylock',
+ 'run-mutex-allrecord-block',
+ 'run-mutex-transaction1',
+ 'run-mutex-die',
+ 'run-mutex1',
+]
+
+def set_options(opt):
+ opt.BUILTIN_DEFAULT('replace')
+ opt.PRIVATE_EXTENSION_DEFAULT('tdb', noextension='tdb')
+ opt.RECURSE('lib/replace')
+ opt.add_option('--disable-tdb-mutex-locking',
+ help=("Disable the use of pthread robust mutexes"),
+ action="store_true", dest='disable_tdb_mutex_locking',
+ default=False)
+ if opt.IN_LAUNCH_DIR():
+ opt.add_option('--disable-python',
+ help=("disable the pytdb module"),
+ action="store_true", dest='disable_python', default=False)
+
+
+def configure(conf):
+ conf.env.disable_tdb_mutex_locking = getattr(Options.options,
+ 'disable_tdb_mutex_locking',
+ False)
+ if not conf.env.disable_tdb_mutex_locking:
+ conf.env.replace_add_global_pthread = True
+ conf.RECURSE('lib/replace')
+
+ conf.env.standalone_tdb = conf.IN_LAUNCH_DIR()
+ conf.env.building_tdb = True
+
+ if not conf.env.standalone_tdb:
+ if conf.CHECK_BUNDLED_SYSTEM_PKG('tdb', minversion=VERSION,
+ implied_deps='replace'):
+ conf.define('USING_SYSTEM_TDB', 1)
+ conf.env.building_tdb = False
+ if conf.CHECK_BUNDLED_SYSTEM_PYTHON('pytdb', 'tdb', minversion=VERSION):
+ conf.define('USING_SYSTEM_PYTDB', 1)
+
+ conf.env.disable_python = getattr(Options.options, 'disable_python', False)
+
+ if (conf.CONFIG_SET('HAVE_ROBUST_MUTEXES') and
+ conf.env.building_tdb and
+ not conf.env.disable_tdb_mutex_locking):
+ conf.define('USE_TDB_MUTEX_LOCKING', 1)
+
+ conf.CHECK_XSLTPROC_MANPAGES()
+
+ if not conf.env.disable_python:
+ # also disable if we don't have the python libs installed
+ conf.find_program('python', var='PYTHON')
+ conf.check_tool('python')
+ conf.check_python_version((2,4,2))
+ conf.SAMBA_CHECK_PYTHON_HEADERS(mandatory=False)
+ if not conf.env.HAVE_PYTHON_H:
+ Logs.warn('Disabling pytdb as python devel libs not found')
+ conf.env.disable_python = True
+
+ conf.SAMBA_CONFIG_H()
+
+ conf.SAMBA_CHECK_UNDEFINED_SYMBOL_FLAGS()
+
+def build(bld):
+ bld.RECURSE('lib/replace')
+
+ COMMON_FILES='''check.c error.c tdb.c traverse.c
+ freelistcheck.c lock.c dump.c freelist.c
+ io.c open.c transaction.c hash.c summary.c rescue.c
+ mutex.c'''
+
+ COMMON_SRC = bld.SUBDIR('common', COMMON_FILES)
+
+ if bld.env.standalone_tdb:
+ bld.env.PKGCONFIGDIR = '${LIBDIR}/pkgconfig'
+ private_library = False
+ else:
+ private_library = True
+
+ if not bld.CONFIG_SET('USING_SYSTEM_TDB'):
+
+ tdb_deps = 'replace'
+
+ if bld.CONFIG_SET('USE_TDB_MUTEX_LOCKING'):
+ tdb_deps += ' pthread'
+
+ bld.SAMBA_LIBRARY('tdb',
+ COMMON_SRC,
+ deps=tdb_deps,
+ includes='include',
+ abi_directory='ABI',
+ abi_match='tdb_*',
+ hide_symbols=True,
+ vnum=VERSION,
+ public_headers='include/tdb.h',
+ public_headers_install=not private_library,
+ pc_files='tdb.pc',
+ private_library=private_library)
+
+ bld.SAMBA_BINARY('tdbtorture',
+ 'tools/tdbtorture.c',
+ 'tdb',
+ install=False)
+
+ bld.SAMBA_BINARY('tdbrestore',
+ 'tools/tdbrestore.c',
+ 'tdb', manpages='man/tdbrestore.8')
+
+ bld.SAMBA_BINARY('tdbdump',
+ 'tools/tdbdump.c',
+ 'tdb', manpages='man/tdbdump.8')
+
+ bld.SAMBA_BINARY('tdbbackup',
+ 'tools/tdbbackup.c',
+ 'tdb',
+ manpages='man/tdbbackup.8')
+
+ bld.SAMBA_BINARY('tdbtool',
+ 'tools/tdbtool.c',
+ 'tdb', manpages='man/tdbtool.8')
+
+ if bld.env.standalone_tdb:
+ # FIXME: This hardcoded list is stupid, stupid, stupid.
+ bld.SAMBA_SUBSYSTEM('tdb-test-helpers',
+ 'test/external-agent.c test/lock-tracking.c test/logging.c',
+ tdb_deps,
+ includes='include')
+
+ for t in tdb1_unit_tests:
+ b = "tdb1-" + t
+ s = "test/" + t + ".c"
+ bld.SAMBA_BINARY(b, s, 'replace tdb-test-helpers',
+ includes='include', install=False)
+
+ if not bld.CONFIG_SET('USING_SYSTEM_PYTDB'):
+ bld.SAMBA_PYTHON('pytdb',
+ 'pytdb.c',
+ deps='tdb',
+ enabled=not bld.env.disable_python,
+ realname='tdb.so',
+ cflags='-DPACKAGE_VERSION=\"%s\"' % VERSION)
+
+def testonly(ctx):
+ '''run tdb testsuite'''
+ import Utils, samba_utils, shutil
+ ecode = 0
+
+ test_prefix = "%s/st" % (Utils.g_module.blddir)
+ shutil.rmtree(test_prefix, ignore_errors=True)
+ os.makedirs(test_prefix)
+ os.environ['TEST_DATA_PREFIX'] = test_prefix
+
+ env = samba_utils.LOAD_ENVIRONMENT()
+ # FIXME: This is horrible :(
+ if env.building_tdb:
+ # Create scratch directory for tests.
+ testdir = os.path.join(test_prefix, 'tdb-tests')
+ samba_utils.mkdir_p(testdir)
+ # Symlink back to source dir so it can find tests in test/
+ link = os.path.join(testdir, 'test')
+ if not os.path.exists(link):
+ os.symlink(os.path.abspath(os.path.join(env.cwd, 'test')), link)
+
+ for t in tdb1_unit_tests:
+ f = "tdb1-" + t
+ cmd = "cd " + testdir + " && " + os.path.abspath(os.path.join(Utils.g_module.blddir, f)) + " > test-output 2>&1"
+ print("..." + f)
+ ret = samba_utils.RUN_COMMAND(cmd)
+ if ret != 0:
+ print("%s failed:" % f)
+ samba_utils.RUN_COMMAND("cat " + os.path.join(testdir, 'test-output'))
+ ecode = ret
+ break
+
+ if ecode == 0:
+ cmd = os.path.join(Utils.g_module.blddir, 'tdbtorture')
+ ret = samba_utils.RUN_COMMAND(cmd)
+ print("testsuite returned %d" % ret)
+ if ret != 0:
+ ecode = ret
+ sys.exit(ecode)
+
+# WAF doesn't build the unit tests for this, maybe because they don't link with tdb?
+# This forces it
+def test(ctx):
+ import Scripting
+ Scripting.commands.append('build')
+ Scripting.commands.append('testonly')
+
+def dist():
+ '''makes a tarball for distribution'''
+ samba_dist.dist()
+
+def reconfigure(ctx):
+ '''reconfigure if config scripts have changed'''
+ import samba_utils
+ samba_utils.reconfigure(ctx)